JeCabrera commited on
Commit
5306417
·
verified ·
1 Parent(s): 239043a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +85 -65
app.py CHANGED
@@ -2,12 +2,10 @@ import os
2
  import time
3
  import uuid
4
  from typing import List, Tuple, Optional, Union
5
-
6
- from dotenv import load_dotenv
7
  from PIL import Image
8
- import numpy as np
9
  import google.generativeai as genai
10
  import gradio as gr
 
11
 
12
  # Cargar las variables de entorno desde el archivo .env
13
  load_dotenv()
@@ -17,44 +15,47 @@ GOOGLE_API_KEY = os.getenv("GOOGLE_API_KEY")
17
  if not GOOGLE_API_KEY:
18
  raise ValueError("GOOGLE_API_KEY is not set in environment variables.")
19
 
20
- # Configuración de Gemini
21
  genai.configure(api_key=GOOGLE_API_KEY)
22
 
23
- # Parámetros comunes para el modelo
24
- generation_config = {
25
- "temperature": 1,
26
- "top_p": 0.95,
27
- "top_k": 40,
28
- "max_output_tokens": 8192,
29
- "response_mime_type": "text/plain",
30
- }
31
-
32
- # Inicialización del modelo
33
- model = genai.GenerativeModel(model_name="gemini-1.5-flash", generation_config=generation_config)
34
-
35
- # Título y subtítulo
36
  TITLE = """<h1 align="center">Gemini Playground ✨</h1>"""
37
  SUBTITLE = """<h2 align="center">Play with Gemini Pro and Gemini Pro Vision</h2>"""
38
 
39
- # Función para transformar el historial de chat
40
- def transform_history(history: List[Tuple[str, str]]) -> list:
41
- return [
42
- {"parts": [{"text": entry[0]}], "role": "user"} if i % 2 == 0 else {"parts": [{"text": entry[1]}], "role": "model"}
43
- for i, entry in enumerate(history)
44
- ]
45
-
46
- # Código para la pestaña 1: Chat básico con Gemini
47
- def bot_response(model_choice: str, system_instruction: str, text_prompt: str, chatbot: list) -> Tuple[list, str]:
 
 
 
 
 
48
  if not text_prompt.strip():
49
  return chatbot, "Por favor, escribe un mensaje válido."
 
 
 
 
 
 
 
 
 
 
50
  chat = model.start_chat(history=transform_history(chatbot))
51
  chat.system_instruction = system_instruction
 
52
  response = chat.send_message(text_prompt)
53
  response.resolve()
 
54
  chatbot.append((text_prompt, response.text))
55
  return chatbot, ""
56
 
57
- # Código para la pestaña 2: Chat con imágenes y configuración avanzada
58
  IMAGE_CACHE_DIRECTORY = "/tmp"
59
  IMAGE_WIDTH = 512
60
 
@@ -70,7 +71,7 @@ def cache_pil_image(image: Image.Image) -> str:
70
  image.save(image_path, "JPEG")
71
  return image_path
72
 
73
- def upload(files: Optional[List[str]], chatbot: List[Tuple[Optional[Union[Tuple[str], str]], Optional[str]]]) -> list:
74
  for file in files:
75
  image = Image.open(file).convert("RGB")
76
  image_preview = preprocess_image(image)
@@ -81,68 +82,87 @@ def upload(files: Optional[List[str]], chatbot: List[Tuple[Optional[Union[Tuple[
81
  return chatbot
82
 
83
  def advanced_response(
84
- files: Optional[List[str]], model_choice: str, system_instruction: str, chatbot: list
 
 
 
85
  ):
86
  if not files:
87
  return chatbot
88
- chat = model.start_chat(history=transform_history(chatbot))
 
 
 
 
 
 
 
 
 
 
89
  chat.system_instruction = system_instruction
 
90
  images = [cache_pil_image(preprocess_image(Image.open(file))) for file in files]
91
- response = chat.generate_content(images, stream=True, generation_config=generation_config)
 
92
  chatbot[-1][1] = ""
93
  for chunk in response:
94
  chatbot[-1][1] += chunk.text
95
  yield chatbot
96
 
97
- # Construcción de la interfaz
98
  with gr.Blocks() as demo:
99
  gr.HTML(TITLE)
100
  gr.HTML(SUBTITLE)
101
- with gr.Tab("Chat Básico"):
102
- gr.Markdown("### Chat con Gemini Pro")
103
- model_dropdown = gr.Dropdown(
104
  choices=["gemini-1.5-flash", "gemini-2.0-flash-exp", "gemini-1.5-pro"],
105
  value="gemini-1.5-flash",
106
- label="Selecciona el modelo"
107
  )
108
- chatbot_basic = gr.Chatbot(label="Chat Básico", height=300)
109
- text_input = gr.Textbox(placeholder="Escribe tu mensaje...", show_label=False)
110
- system_instruction = gr.Textbox(
111
- placeholder="Instrucción del sistema...",
 
112
  label="Instrucción del sistema",
113
- value="You are an assistant."
 
114
  )
115
- send_button = gr.Button("Enviar")
116
- send_button.click(
117
- bot_response,
118
- inputs=[model_dropdown, system_instruction, text_input, chatbot_basic],
119
- outputs=[chatbot_basic, text_input]
120
  )
121
- with gr.Tab("Chat Avanzado con Imágenes"):
122
- gr.Markdown("### Interacción con imágenes y Gemini Pro Vision")
123
- model_dropdown_adv = gr.Dropdown(
124
  choices=["gemini-1.5-flash", "gemini-2.0-flash-exp", "gemini-1.5-pro"],
125
  value="gemini-1.5-flash",
126
- label="Selecciona el modelo"
127
  )
128
- chatbot_advanced = gr.Chatbot(label="Chat Avanzado", height=300)
129
- text_input_adv = gr.Textbox(placeholder="Mensaje o descripción...")
130
- system_instruction_adv = gr.Textbox(
131
- placeholder="Instrucción del sistema...",
132
- label="Instrucción del sistema"
 
 
 
133
  )
134
- upload_button = gr.UploadButton(label="Subir Imágenes", file_types=["image"])
135
- process_button = gr.Button("Procesar")
136
- process_button.click(
137
- advanced_response,
138
- inputs=[upload_button, model_dropdown_adv, system_instruction_adv, chatbot_advanced],
139
- outputs=[chatbot_advanced]
140
  )
141
  upload_button.upload(
142
- upload,
143
- inputs=[upload_button, chatbot_advanced],
144
- outputs=[chatbot_advanced]
145
  )
146
 
147
  if __name__ == "__main__":
148
- demo.launch(debug=True)
 
2
  import time
3
  import uuid
4
  from typing import List, Tuple, Optional, Union
 
 
5
  from PIL import Image
 
6
  import google.generativeai as genai
7
  import gradio as gr
8
+ from dotenv import load_dotenv
9
 
10
  # Cargar las variables de entorno desde el archivo .env
11
  load_dotenv()
 
15
  if not GOOGLE_API_KEY:
16
  raise ValueError("GOOGLE_API_KEY is not set in environment variables.")
17
 
 
18
  genai.configure(api_key=GOOGLE_API_KEY)
19
 
 
 
 
 
 
 
 
 
 
 
 
 
 
20
  TITLE = """<h1 align="center">Gemini Playground ✨</h1>"""
21
  SUBTITLE = """<h2 align="center">Play with Gemini Pro and Gemini Pro Vision</h2>"""
22
 
23
+ # Código de la pestaña 1
24
+ def transform_history(history):
25
+ new_history = []
26
+ for chat_entry in history:
27
+ new_history.append({"parts": [{"text": chat_entry[0]}], "role": "user"})
28
+ new_history.append({"parts": [{"text": chat_entry[1]}], "role": "model"})
29
+ return new_history
30
+
31
+ def bot_response(
32
+ model_choice: str,
33
+ system_instruction: str,
34
+ text_prompt: str,
35
+ chatbot: list,
36
+ ) -> Tuple[list, str]:
37
  if not text_prompt.strip():
38
  return chatbot, "Por favor, escribe un mensaje válido."
39
+
40
+ model = genai.GenerativeModel(
41
+ model_name=model_choice,
42
+ generation_config={
43
+ "temperature": 1,
44
+ "top_p": 0.95,
45
+ "top_k": 40,
46
+ "max_output_tokens": 8192,
47
+ },
48
+ )
49
  chat = model.start_chat(history=transform_history(chatbot))
50
  chat.system_instruction = system_instruction
51
+
52
  response = chat.send_message(text_prompt)
53
  response.resolve()
54
+
55
  chatbot.append((text_prompt, response.text))
56
  return chatbot, ""
57
 
58
+ # Código de la pestaña 2
59
  IMAGE_CACHE_DIRECTORY = "/tmp"
60
  IMAGE_WIDTH = 512
61
 
 
71
  image.save(image_path, "JPEG")
72
  return image_path
73
 
74
+ def upload(files: Optional[List[str]], chatbot: list) -> list:
75
  for file in files:
76
  image = Image.open(file).convert("RGB")
77
  image_preview = preprocess_image(image)
 
82
  return chatbot
83
 
84
  def advanced_response(
85
+ files: Optional[List[str]],
86
+ model_choice: str,
87
+ system_instruction: str,
88
+ chatbot: list,
89
  ):
90
  if not files:
91
  return chatbot
92
+
93
+ model = genai.GenerativeModel(
94
+ model_name=model_choice,
95
+ generation_config={
96
+ "temperature": 0.7,
97
+ "max_output_tokens": 8192,
98
+ "top_k": 10,
99
+ "top_p": 0.9,
100
+ },
101
+ )
102
+ chat = model.start_chat(history=chatbot)
103
  chat.system_instruction = system_instruction
104
+
105
  images = [cache_pil_image(preprocess_image(Image.open(file))) for file in files]
106
+ response = chat.generate_content(images, stream=True)
107
+
108
  chatbot[-1][1] = ""
109
  for chunk in response:
110
  chatbot[-1][1] += chunk.text
111
  yield chatbot
112
 
113
+ # Construcción de la interfaz con las dos pestañas originales
114
  with gr.Blocks() as demo:
115
  gr.HTML(TITLE)
116
  gr.HTML(SUBTITLE)
117
+
118
+ with gr.Tab("Pestaña 1"):
119
+ model_dropdown_1 = gr.Dropdown(
120
  choices=["gemini-1.5-flash", "gemini-2.0-flash-exp", "gemini-1.5-pro"],
121
  value="gemini-1.5-flash",
122
+ label="Selecciona el modelo",
123
  )
124
+ chatbot_1 = gr.Chatbot(label="Gemini", scale=2, height=300)
125
+ text_input_1 = gr.Textbox(placeholder="Escribe un mensaje...", show_label=False, scale=8)
126
+ run_button_1 = gr.Button(value="Enviar", variant="primary", scale=1)
127
+ system_instruction_1 = gr.Textbox(
128
+ placeholder="Escribe una instrucción para el sistema...",
129
  label="Instrucción del sistema",
130
+ scale=8,
131
+ value="You are an assistant.",
132
  )
133
+
134
+ run_button_1.click(
135
+ fn=bot_response,
136
+ inputs=[model_dropdown_1, system_instruction_1, text_input_1, chatbot_1],
137
+ outputs=[chatbot_1, text_input_1],
138
  )
139
+
140
+ with gr.Tab("Pestaña 2"):
141
+ model_dropdown_2 = gr.Dropdown(
142
  choices=["gemini-1.5-flash", "gemini-2.0-flash-exp", "gemini-1.5-pro"],
143
  value="gemini-1.5-flash",
144
+ label="Select Model",
145
  )
146
+ chatbot_2 = gr.Chatbot(label="Gemini", height=300)
147
+ text_input_2 = gr.Textbox(placeholder="Message or description...", show_label=False, scale=8)
148
+ upload_button = gr.UploadButton(label="Upload Images", file_count="multiple", file_types=["image"])
149
+ run_button_2 = gr.Button(value="Run", variant="primary", scale=1)
150
+ system_instruction_2 = gr.Textbox(
151
+ placeholder="Enter system instruction...",
152
+ label="System Instruction",
153
+ scale=8,
154
  )
155
+
156
+ run_button_2.click(
157
+ fn=advanced_response,
158
+ inputs=[upload_button, model_dropdown_2, system_instruction_2, chatbot_2],
159
+ outputs=[chatbot_2],
 
160
  )
161
  upload_button.upload(
162
+ fn=upload,
163
+ inputs=[upload_button, chatbot_2],
164
+ outputs=[chatbot_2],
165
  )
166
 
167
  if __name__ == "__main__":
168
+ demo.launch(debug=True, queue=True)