JeCabrera commited on
Commit
5ef5e82
verified
1 Parent(s): c8fa5c6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +157 -75
app.py CHANGED
@@ -1,87 +1,169 @@
1
  import os
2
  import time
 
 
 
3
  import google.generativeai as genai
4
- from typing import List
5
-
6
- # Configurar la API con la clave de entorno
7
- genai.configure(api_key=os.environ["GEMINI_API_KEY"])
8
-
9
- def upload_to_gemini(path, mime_type=None):
10
- """Sube el archivo dado a Gemini."""
11
- file = genai.upload_file(path, mime_type=mime_type)
12
- print(f"Uploaded file '{file.display_name}' as: {file.uri}")
13
- return file
14
-
15
- def wait_for_files_active(files: List):
16
- """Espera que los archivos cargados est茅n activos y procesados."""
17
- print("Waiting for file processing...")
18
- for name in (file.name for file in files):
19
- file = genai.get_file(name)
20
- while file.state.name == "PROCESSING":
21
- print(".", end="", flush=True)
22
- time.sleep(10)
23
- file = genai.get_file(name)
24
- if file.state.name != "ACTIVE":
25
- raise Exception(f"File {file.name} failed to process")
26
- print("...all files ready")
27
- print()
28
-
29
- # Configuraci贸n del modelo y par谩metros de generaci贸n
30
- generation_config = {
31
- "temperature": 1,
32
- "top_p": 0.95,
33
- "top_k": 40,
34
- "max_output_tokens": 8192,
35
- "response_mime_type": "text/plain",
36
- }
37
-
38
- model = genai.GenerativeModel(
39
- model_name="gemini-1.5-flash",
40
- generation_config=generation_config,
41
- )
42
 
43
- def upload_files(files: List[str]) -> List:
44
- """Sube m煤ltiples archivos (im谩genes, PDFs, etc.) a Gemini."""
45
- uploaded_files = []
 
 
 
 
 
 
 
 
 
 
46
  for file in files:
47
- file_extension = os.path.splitext(file)[1].lower()
48
- if file_extension in [".jpg", ".jpeg", ".png", ".gif"]:
49
- # Para im谩genes, usamos el mime_type correspondiente
50
- mime_type = "image/jpeg" if file_extension in [".jpg", ".jpeg"] else "image/png"
51
- elif file_extension == ".pdf":
52
- mime_type = "application/pdf"
53
- elif file_extension in [".txt", ".csv"]:
54
- mime_type = "text/plain"
55
- else:
56
- mime_type = None # Para otros tipos de archivo, determinamos el mime_type si es necesario
57
 
58
- uploaded_files.append(upload_to_gemini(file, mime_type))
59
-
60
- return uploaded_files
61
-
62
- # Archivos a cargar (Ejemplo: agregar los archivos a subir)
63
- files_to_upload = [
64
- "Report 2025 - Jan 15th.pdf", # Ejemplo de archivo PDF
65
- "image_example.jpg", # Ejemplo de archivo de imagen
66
- "text_example.txt", # Ejemplo de archivo de texto
67
- ]
 
 
 
 
 
 
 
 
 
 
 
 
68
 
69
- # Subir los archivos
70
- uploaded_files = upload_files(files_to_upload)
 
 
71
 
72
- # Esperar a que los archivos sean procesados y est茅n activos
73
- wait_for_files_active(uploaded_files)
 
 
 
 
 
74
 
75
- # Iniciar sesi贸n de chat con el modelo
76
- chat_session = model.start_chat(
77
- history=[{
78
- "role": "user",
79
- "parts": uploaded_files, # Incluir archivos subidos en la conversaci贸n
80
- }],
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
81
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
82
 
83
- # Enviar mensaje al modelo
84
- response = chat_session.send_message("INSERT_INPUT_HERE")
 
 
 
 
85
 
86
- # Imprimir la respuesta
87
- print(response.text)
 
1
  import os
2
  import time
3
+ import uuid
4
+ from typing import List, Tuple, Optional, Union
5
+
6
  import google.generativeai as genai
7
+ import gradio as gr
8
+ from PIL import Image
9
+ from dotenv import load_dotenv
10
+
11
+ # Cargar las variables de entorno desde el archivo .env
12
+ load_dotenv()
13
+
14
+ print("google-generativeai:", genai.__version__)
15
+
16
+ # Obtener la clave de la API de las variables de entorno
17
+ GOOGLE_API_KEY = os.getenv("GOOGLE_API_KEY")
18
+
19
+ # Verificar que la clave de la API est茅 configurada
20
+ if not GOOGLE_API_KEY:
21
+ raise ValueError("GOOGLE_API_KEY is not set in environment variables.")
22
+
23
+ IMAGE_CACHE_DIRECTORY = "/tmp"
24
+ IMAGE_WIDTH = 512
25
+ CHAT_HISTORY = List[Tuple[Optional[Union[Tuple[str], str]], Optional[str]]]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
26
 
27
+ def preprocess_image(image: Image.Image) -> Optional[Image.Image]:
28
+ if image:
29
+ image_height = int(image.height * IMAGE_WIDTH / image.width)
30
+ return image.resize((IMAGE_WIDTH, image_height))
31
+
32
+ def cache_pil_image(image: Image.Image) -> str:
33
+ image_filename = f"{uuid.uuid4()}.jpeg"
34
+ os.makedirs(IMAGE_CACHE_DIRECTORY, exist_ok=True)
35
+ image_path = os.path.join(IMAGE_CACHE_DIRECTORY, image_filename)
36
+ image.save(image_path, "JPEG")
37
+ return image_path
38
+
39
+ def upload(files: Optional[List[str]], chatbot: CHAT_HISTORY) -> CHAT_HISTORY:
40
  for file in files:
41
+ # Detectar el tipo de archivo y manejarlo adecuadamente
42
+ mime_type = file.type if hasattr(file, 'type') else None
 
 
 
 
 
 
 
 
43
 
44
+ # Si es una imagen, la procesamos con PIL
45
+ if mime_type and mime_type.startswith('image'):
46
+ image = Image.open(file).convert('RGB')
47
+ image_preview = preprocess_image(image)
48
+ if image_preview:
49
+ # Mostrar una vista previa de la imagen cargada
50
+ gr.Image(image_preview).render()
51
+ image_path = cache_pil_image(image)
52
+ chatbot.append(((image_path,), None))
53
+ else:
54
+ # Si no es una imagen, se guarda el archivo tal cual
55
+ file_path = cache_file(file)
56
+ chatbot.append(((file_path,), None))
57
+ return chatbot
58
+
59
+ def cache_file(file: str) -> str:
60
+ file_filename = f"{uuid.uuid4()}_{os.path.basename(file.name)}"
61
+ os.makedirs(IMAGE_CACHE_DIRECTORY, exist_ok=True)
62
+ file_path = os.path.join(IMAGE_CACHE_DIRECTORY, file_filename)
63
+ with open(file_path, 'wb') as f:
64
+ f.write(file.read())
65
+ return file_path
66
 
67
+ def user(text_prompt: str, chatbot: CHAT_HISTORY):
68
+ if text_prompt:
69
+ chatbot.append((text_prompt, None))
70
+ return "", chatbot
71
 
72
+ def bot(
73
+ files: Optional[List[str]],
74
+ model_choice: str,
75
+ chatbot: CHAT_HISTORY
76
+ ):
77
+ if not GOOGLE_API_KEY:
78
+ raise ValueError("GOOGLE_API_KEY is not set.")
79
 
80
+ # Configurar la API con la clave
81
+ genai.configure(api_key=GOOGLE_API_KEY)
82
+ generation_config = genai.types.GenerationConfig(
83
+ temperature=0.7, # Valor predeterminado
84
+ max_output_tokens=8192, # Fijar el l铆mite de tokens a 8,192
85
+ top_k=10, # Valor predeterminado
86
+ top_p=0.9 # Valor predeterminado
87
+ )
88
+
89
+ text_prompt = [chatbot[-1][0]] if chatbot and chatbot[-1][0] and isinstance(chatbot[-1][0], str) else []
90
+ files_paths = [preprocess_image(Image.open(file).convert('RGB')) if file.type.startswith('image') else cache_file(file) for file in files]
91
+ model = genai.GenerativeModel(model_choice)
92
+ response = model.generate_content(text_prompt + files_paths, stream=True, generation_config=generation_config)
93
+
94
+ chatbot[-1][1] = ""
95
+ for chunk in response:
96
+ for i in range(0, len(chunk.text), 10):
97
+ section = chunk.text[i:i + 10]
98
+ chatbot[-1][1] += section
99
+ time.sleep(0.01)
100
+ yield chatbot
101
+
102
+ chatbot_component = gr.Chatbot(
103
+ label='Gemini',
104
+ bubble_full_width=False,
105
+ scale=2,
106
+ height=300
107
+ )
108
+ text_prompt_component = gr.Textbox(
109
+ placeholder="Message...", show_label=False, autofocus=True, scale=8
110
  )
111
+ upload_button_component = gr.UploadButton(
112
+ label="Upload Files", file_count="multiple", scale=1
113
+ )
114
+ run_button_component = gr.Button(value="Run", variant="primary", scale=1)
115
+ model_choice_component = gr.Dropdown(
116
+ choices=["gemini-1.5-flash", "gemini-2.0-flash-exp", "gemini-1.5-pro"],
117
+ value="gemini-1.5-flash",
118
+ label="Select Model",
119
+ scale=2
120
+ )
121
+
122
+ user_inputs = [
123
+ text_prompt_component,
124
+ chatbot_component
125
+ ]
126
+
127
+ bot_inputs = [
128
+ upload_button_component,
129
+ model_choice_component,
130
+ chatbot_component
131
+ ]
132
+
133
+ with gr.Blocks() as demo:
134
+ gr.HTML(TITLE)
135
+ gr.HTML(SUBTITLE)
136
+ with gr.Column():
137
+ chatbot_component.render()
138
+ with gr.Row():
139
+ text_prompt_component.render()
140
+ upload_button_component.render()
141
+ run_button_component.render()
142
+ model_choice_component.render()
143
+
144
+ run_button_component.click(
145
+ fn=user,
146
+ inputs=user_inputs,
147
+ outputs=[text_prompt_component, chatbot_component],
148
+ queue=False
149
+ ).then(
150
+ fn=bot, inputs=bot_inputs, outputs=[chatbot_component],
151
+ )
152
+
153
+ text_prompt_component.submit(
154
+ fn=user,
155
+ inputs=user_inputs,
156
+ outputs=[text_prompt_component, chatbot_component],
157
+ queue=False
158
+ ).then(
159
+ fn=bot, inputs=bot_inputs, outputs=[chatbot_component],
160
+ )
161
 
162
+ upload_button_component.upload(
163
+ fn=upload,
164
+ inputs=[upload_button_component, chatbot_component],
165
+ outputs=[chatbot_component],
166
+ queue=False
167
+ )
168
 
169
+ demo.queue(max_size=99).launch(debug=False, show_error=True)