JeCabrera commited on
Commit
a3258ba
·
verified ·
1 Parent(s): 6a671c6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +37 -17
app.py CHANGED
@@ -5,10 +5,10 @@ import os
5
  import time
6
  import uuid
7
  from typing import List, Tuple, Optional, Union
8
-
9
  import google.generativeai as genai
10
  import gradio as gr
11
  from PIL import Image
 
12
  from dotenv import load_dotenv
13
 
14
  # Cargar las variables de entorno desde el archivo .env
@@ -39,34 +39,53 @@ def cache_pil_image(image: Image.Image) -> str:
39
  image.save(image_path, "JPEG")
40
  return image_path
41
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
42
  def upload(files: Optional[List[str]], chatbot: CHAT_HISTORY) -> CHAT_HISTORY:
43
  for file in files:
44
- # Detectar el tipo de archivo y manejarlo adecuadamente
45
  mime_type = file.type if hasattr(file, 'type') else None
46
 
47
  # Si es una imagen, la procesamos con PIL
48
  if mime_type and mime_type.startswith('image'):
49
- image = Image.open(file).convert('RGB')
50
  image_preview = preprocess_image(image)
51
  if image_preview:
52
  # Mostrar una vista previa de la imagen cargada
53
  gr.Image(image_preview).render()
54
  image_path = cache_pil_image(image)
55
  chatbot.append(((image_path,), None))
 
 
 
 
 
 
 
 
 
 
 
 
56
  else:
57
- # Si no es una imagen, se guarda el archivo tal cual
58
  file_path = cache_file(file)
59
  chatbot.append(((file_path,), None))
60
  return chatbot
61
 
62
- def cache_file(file: str) -> str:
63
- file_filename = f"{uuid.uuid4()}_{os.path.basename(file.name)}"
64
- os.makedirs(IMAGE_CACHE_DIRECTORY, exist_ok=True)
65
- file_path = os.path.join(IMAGE_CACHE_DIRECTORY, file_filename)
66
- with open(file_path, 'wb') as f:
67
- f.write(file.read())
68
- return file_path
69
-
70
  def user(text_prompt: str, chatbot: CHAT_HISTORY):
71
  if text_prompt:
72
  chatbot.append((text_prompt, None))
@@ -90,9 +109,9 @@ def bot(
90
  )
91
 
92
  text_prompt = [chatbot[-1][0]] if chatbot and chatbot[-1][0] and isinstance(chatbot[-1][0], str) else []
93
- files_paths = [preprocess_image(Image.open(file).convert('RGB')) if file.type.startswith('image') else cache_file(file) for file in files]
94
  model = genai.GenerativeModel(model_choice)
95
- response = model.generate_content(text_prompt + files_paths, stream=True, generation_config=generation_config)
96
 
97
  chatbot[-1][1] = ""
98
  for chunk in response:
@@ -112,7 +131,7 @@ text_prompt_component = gr.Textbox(
112
  placeholder="Message...", show_label=False, autofocus=True, scale=8
113
  )
114
  upload_button_component = gr.UploadButton(
115
- label="Upload Files", file_count="multiple", scale=1
116
  )
117
  run_button_component = gr.Button(value="Run", variant="primary", scale=1)
118
  model_choice_component = gr.Dropdown(
@@ -134,8 +153,8 @@ bot_inputs = [
134
  ]
135
 
136
  with gr.Blocks() as demo:
137
- gr.HTML(TITLE)
138
- gr.HTML(SUBTITLE)
139
  with gr.Column():
140
  chatbot_component.render()
141
  with gr.Row():
@@ -170,3 +189,4 @@ with gr.Blocks() as demo:
170
  )
171
 
172
  demo.queue(max_size=99).launch(debug=False, show_error=True)
 
 
5
  import time
6
  import uuid
7
  from typing import List, Tuple, Optional, Union
 
8
  import google.generativeai as genai
9
  import gradio as gr
10
  from PIL import Image
11
+ import PyPDF2
12
  from dotenv import load_dotenv
13
 
14
  # Cargar las variables de entorno desde el archivo .env
 
39
  image.save(image_path, "JPEG")
40
  return image_path
41
 
42
+ def cache_file(file) -> str:
43
+ """Guarda el archivo tal cual en el sistema temporal."""
44
+ file_path = f"/tmp/{uuid.uuid4()}_{file.name}"
45
+ with open(file_path, "wb") as f:
46
+ f.write(file.read()) # Aquí es donde 'file' puede tener el método read()
47
+ return file_path
48
+
49
+ def extract_text_from_pdf(pdf_path: str) -> str:
50
+ """Extrae el texto de un archivo PDF."""
51
+ with open(pdf_path, 'rb') as f:
52
+ pdf_reader = PyPDF2.PdfReader(f)
53
+ text = ""
54
+ for page in pdf_reader.pages:
55
+ text += page.extract_text()
56
+ return text
57
+
58
  def upload(files: Optional[List[str]], chatbot: CHAT_HISTORY) -> CHAT_HISTORY:
59
  for file in files:
 
60
  mime_type = file.type if hasattr(file, 'type') else None
61
 
62
  # Si es una imagen, la procesamos con PIL
63
  if mime_type and mime_type.startswith('image'):
64
+ image = Image.open(file.name).convert('RGB') # Abrir el archivo con su ruta
65
  image_preview = preprocess_image(image)
66
  if image_preview:
67
  # Mostrar una vista previa de la imagen cargada
68
  gr.Image(image_preview).render()
69
  image_path = cache_pil_image(image)
70
  chatbot.append(((image_path,), None))
71
+
72
+ # Si es un archivo PDF, lo procesamos y extraemos el texto
73
+ elif mime_type and mime_type == "application/pdf":
74
+ pdf_content = extract_text_from_pdf(file.name)
75
+ chatbot.append((pdf_content, None))
76
+
77
+ # Si es un archivo de texto, lo procesamos directamente
78
+ elif mime_type and mime_type == "text/plain":
79
+ with open(file.name, 'r', encoding='utf-8') as f:
80
+ text_content = f.read()
81
+ chatbot.append((text_content, None))
82
+
83
  else:
84
+ # Si es otro tipo de archivo, se guarda el archivo tal cual
85
  file_path = cache_file(file)
86
  chatbot.append(((file_path,), None))
87
  return chatbot
88
 
 
 
 
 
 
 
 
 
89
  def user(text_prompt: str, chatbot: CHAT_HISTORY):
90
  if text_prompt:
91
  chatbot.append((text_prompt, None))
 
109
  )
110
 
111
  text_prompt = [chatbot[-1][0]] if chatbot and chatbot[-1][0] and isinstance(chatbot[-1][0], str) else []
112
+ image_prompt = [preprocess_image(Image.open(file).convert('RGB')) for file in files if file.type.startswith('image')] if files else []
113
  model = genai.GenerativeModel(model_choice)
114
+ response = model.generate_content(text_prompt + image_prompt, stream=True, generation_config=generation_config)
115
 
116
  chatbot[-1][1] = ""
117
  for chunk in response:
 
131
  placeholder="Message...", show_label=False, autofocus=True, scale=8
132
  )
133
  upload_button_component = gr.UploadButton(
134
+ label="Upload Files", file_count="multiple", file_types=["image", "pdf", "text"], scale=1
135
  )
136
  run_button_component = gr.Button(value="Run", variant="primary", scale=1)
137
  model_choice_component = gr.Dropdown(
 
153
  ]
154
 
155
  with gr.Blocks() as demo:
156
+ gr.HTML("<h1 align='center'>Gemini Playground ✨</h1>")
157
+ gr.HTML("<h2 align='center'>Play with Gemini Pro and Gemini Pro Vision</h2>")
158
  with gr.Column():
159
  chatbot_component.render()
160
  with gr.Row():
 
189
  )
190
 
191
  demo.queue(max_size=99).launch(debug=False, show_error=True)
192
+