Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -6,7 +6,6 @@ from PIL import Image
|
|
6 |
import google.generativeai as genai
|
7 |
import gradio as gr
|
8 |
from dotenv import load_dotenv
|
9 |
-
import base64
|
10 |
|
11 |
# Cargar las variables de entorno desde el archivo .env
|
12 |
load_dotenv()
|
@@ -30,64 +29,50 @@ model = genai.GenerativeModel(
|
|
30 |
generation_config=generation_config,
|
31 |
)
|
32 |
|
33 |
-
#
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
new_history.append({"parts": [{"text": chat[0]}], "role": "user"})
|
54 |
-
if chat[1]: # Respuesta del modelo
|
55 |
-
new_history.append({"parts": [{"text": chat[1]}], "role": "model"})
|
56 |
-
return new_history
|
57 |
|
58 |
# Funci贸n principal para manejar las respuestas del chat
|
59 |
def response(message, history):
|
60 |
"""Maneja la interacci贸n multimodal y env铆a texto e im谩genes al modelo."""
|
61 |
-
global chat
|
62 |
-
|
63 |
-
# Transformar el historial al formato esperado por Gemini
|
64 |
-
chat.history = transform_history(history)
|
65 |
-
|
66 |
-
# Obtener el texto del mensaje y las im谩genes cargadas
|
67 |
text_prompt = message["text"]
|
68 |
files = message["files"]
|
69 |
-
|
70 |
# Procesar im谩genes cargadas
|
71 |
image_prompts = []
|
72 |
if files:
|
73 |
for file in files:
|
74 |
image = Image.open(file).convert('RGB')
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
response
|
86 |
-
|
87 |
-
# Generar respuesta car谩cter por car谩cter para una experiencia m谩s fluida
|
88 |
-
for i in range(len(response.text)):
|
89 |
-
time.sleep(0.01)
|
90 |
-
yield response.text[: i + 1]
|
91 |
|
92 |
# Crear la interfaz de usuario
|
93 |
demo = gr.ChatInterface(
|
|
|
6 |
import google.generativeai as genai
|
7 |
import gradio as gr
|
8 |
from dotenv import load_dotenv
|
|
|
9 |
|
10 |
# Cargar las variables de entorno desde el archivo .env
|
11 |
load_dotenv()
|
|
|
29 |
generation_config=generation_config,
|
30 |
)
|
31 |
|
32 |
+
# Constantes para el manejo de im谩genes
|
33 |
+
IMAGE_CACHE_DIRECTORY = "/tmp"
|
34 |
+
IMAGE_WIDTH = 512
|
35 |
+
CHAT_HISTORY = List[Tuple[Optional[Union[Tuple[str], str]], Optional[str]]]
|
36 |
+
|
37 |
+
# Funci贸n para preprocesar una imagen
|
38 |
+
def preprocess_image(image: Image.Image) -> Optional[Image.Image]:
|
39 |
+
"""Redimensiona una imagen manteniendo la relaci贸n de aspecto."""
|
40 |
+
if image:
|
41 |
+
image_height = int(image.height * IMAGE_WIDTH / image.width)
|
42 |
+
return image.resize((IMAGE_WIDTH, image_height))
|
43 |
+
|
44 |
+
# Funci贸n para almacenar una imagen en cach茅
|
45 |
+
def cache_pil_image(image: Image.Image) -> str:
|
46 |
+
"""Guarda la imagen como archivo JPEG en un directorio temporal."""
|
47 |
+
image_filename = f"{uuid.uuid4()}.jpeg"
|
48 |
+
os.makedirs(IMAGE_CACHE_DIRECTORY, exist_ok=True)
|
49 |
+
image_path = os.path.join(IMAGE_CACHE_DIRECTORY, image_filename)
|
50 |
+
image.save(image_path, "JPEG")
|
51 |
+
return image_path
|
|
|
|
|
|
|
|
|
52 |
|
53 |
# Funci贸n principal para manejar las respuestas del chat
|
54 |
def response(message, history):
|
55 |
"""Maneja la interacci贸n multimodal y env铆a texto e im谩genes al modelo."""
|
|
|
|
|
|
|
|
|
|
|
|
|
56 |
text_prompt = message["text"]
|
57 |
files = message["files"]
|
58 |
+
|
59 |
# Procesar im谩genes cargadas
|
60 |
image_prompts = []
|
61 |
if files:
|
62 |
for file in files:
|
63 |
image = Image.open(file).convert('RGB')
|
64 |
+
image_preview = preprocess_image(image)
|
65 |
+
if image_preview:
|
66 |
+
cache_pil_image(image)
|
67 |
+
image_prompts.append(image_preview) # Incluir en la lista de prompts
|
68 |
+
|
69 |
+
# Concatenar texto e im谩genes para el modelo
|
70 |
+
prompts = [text_prompt] + image_prompts
|
71 |
+
response = model.generate_content(prompts, stream=True, generation_config=generation_config)
|
72 |
+
|
73 |
+
# Generar respuesta paso a paso
|
74 |
+
for chunk in response:
|
75 |
+
yield chunk.text
|
|
|
|
|
|
|
|
|
76 |
|
77 |
# Crear la interfaz de usuario
|
78 |
demo = gr.ChatInterface(
|