Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -33,6 +33,25 @@ model = genai.GenerativeModel(
|
|
33 |
# Inicializar la sesi贸n de chat
|
34 |
chat = model.start_chat(history=[])
|
35 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
36 |
# Constantes y configuraciones
|
37 |
IMAGE_CACHE_DIRECTORY = "/tmp"
|
38 |
IMAGE_WIDTH = 512
|
|
|
33 |
# Inicializar la sesi贸n de chat
|
34 |
chat = model.start_chat(history=[])
|
35 |
|
36 |
+
# Funci贸n para transformar el historial de Gradio al formato de Gemini
|
37 |
+
def transform_history(history):
|
38 |
+
new_history = []
|
39 |
+
for chat in history:
|
40 |
+
new_history.append({"parts": [{"text": chat[0]}], "role": "user"})
|
41 |
+
new_history.append({"parts": [{"text": chat[1]}], "role": "model"})
|
42 |
+
return new_history
|
43 |
+
|
44 |
+
# Funci贸n de respuesta que maneja el texto y los archivos multimodales
|
45 |
+
def response(message, history):
|
46 |
+
global chat
|
47 |
+
|
48 |
+
# Transformar el historial al formato esperado por Gemini
|
49 |
+
chat.history = transform_history(history)
|
50 |
+
|
51 |
+
# Enviar el mensaje al modelo y obtener la respuesta
|
52 |
+
response = chat.send_message(message["text"])
|
53 |
+
response.resolve()
|
54 |
+
|
55 |
# Constantes y configuraciones
|
56 |
IMAGE_CACHE_DIRECTORY = "/tmp"
|
57 |
IMAGE_WIDTH = 512
|