Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -308,27 +308,39 @@ def simple_chat(message: dict, temperature: float = 0.8, max_length: int = 4096,
|
|
308 |
low_cpu_mem_usage=True,
|
309 |
trust_remote_code=True
|
310 |
)
|
|
|
|
|
311 |
|
312 |
conversation = []
|
313 |
|
314 |
-
|
315 |
-
|
316 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
317 |
if choice == "image":
|
318 |
conversation.append({"role": "user", "image": contents, "content": message['text']})
|
319 |
elif choice == "doc":
|
320 |
-
format_msg = contents + "\n\n\n" + "{} files uploaded.\n".format(
|
321 |
conversation.append({"role": "user", "content": format_msg})
|
322 |
else:
|
323 |
-
|
324 |
-
|
325 |
|
326 |
-
|
327 |
-
|
328 |
-
print(" ")
|
329 |
-
print("Conv: ")
|
330 |
print(conversation)
|
331 |
-
print("
|
|
|
|
|
|
|
332 |
|
333 |
generate_kwargs = dict(
|
334 |
max_length=max_length,
|
|
|
308 |
low_cpu_mem_usage=True,
|
309 |
trust_remote_code=True
|
310 |
)
|
311 |
+
|
312 |
+
tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)
|
313 |
|
314 |
conversation = []
|
315 |
|
316 |
+
# Acceder al contenido del archivo y al nombre del archivo
|
317 |
+
if "file_content" in message and message["file_content"]:
|
318 |
+
file_content = message["file_content"]
|
319 |
+
file_name = message["file_name"]
|
320 |
+
|
321 |
+
# Guardar el archivo en un archivo temporal
|
322 |
+
with open(file_name, "wb") as f:
|
323 |
+
f.write(file_content.read())
|
324 |
+
|
325 |
+
# Llamar a `mode_load` con el nombre del archivo
|
326 |
+
choice, contents = mode_load(file_name)
|
327 |
+
|
328 |
if choice == "image":
|
329 |
conversation.append({"role": "user", "image": contents, "content": message['text']})
|
330 |
elif choice == "doc":
|
331 |
+
format_msg = contents + "\n\n\n" + "{} files uploaded.\n".format(1) + message['text']
|
332 |
conversation.append({"role": "user", "content": format_msg})
|
333 |
else:
|
334 |
+
# Manejar caso donde no se sube archivo
|
335 |
+
conversation.append({"role": "user", "content": message['text']})
|
336 |
|
337 |
+
print("--------------")
|
338 |
+
print(" ")
|
|
|
|
|
339 |
print(conversation)
|
340 |
+
print(" ")
|
341 |
+
print("--------------")
|
342 |
+
|
343 |
+
input_ids = tokenizer.apply_chat_template(conversation, tokenize=True, add_generation_prompt=True, return_tensors="pt", return_dict=True).to(model.device)
|
344 |
|
345 |
generate_kwargs = dict(
|
346 |
max_length=max_length,
|