Testing Cleanup
Browse files
app.py
CHANGED
|
@@ -68,10 +68,14 @@ def chat_llama3_8b(message: str,
|
|
| 68 |
str: The generated response.
|
| 69 |
"""
|
| 70 |
conversation = [{"role": "system", "content": system_prompt}]
|
|
|
|
| 71 |
for user, assistant in history:
|
| 72 |
-
conversation.
|
|
|
|
|
|
|
| 73 |
conversation.append({"role": "user", "content": message})
|
| 74 |
|
|
|
|
| 75 |
input_ids = tokenizer.apply_chat_template(conversation, return_tensors="pt").to(model.device)
|
| 76 |
attention_mask = input_ids.ne(tokenizer.pad_token_id).long()
|
| 77 |
|
|
|
|
| 68 |
str: The generated response.
|
| 69 |
"""
|
| 70 |
conversation = [{"role": "system", "content": system_prompt}]
|
| 71 |
+
conversation = []
|
| 72 |
for user, assistant in history:
|
| 73 |
+
conversation.append({"role": "user", "content": user})
|
| 74 |
+
conversation.append({"role": "assistant", "content": assistant})
|
| 75 |
+
|
| 76 |
conversation.append({"role": "user", "content": message})
|
| 77 |
|
| 78 |
+
|
| 79 |
input_ids = tokenizer.apply_chat_template(conversation, return_tensors="pt").to(model.device)
|
| 80 |
attention_mask = input_ids.ne(tokenizer.pad_token_id).long()
|
| 81 |
|