Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -30,11 +30,15 @@ def askme(symptoms, question):
|
|
30 |
prompt = tokenizer.apply_chat_template(messages, template=custom_template, tokenize=False, add_generation_prompt=True)
|
31 |
inputs = tokenizer(prompt, return_tensors="pt").to(device) # Ensure inputs are on CUDA device
|
32 |
outputs = model.generate(**inputs, max_new_tokens=200, use_cache=True)
|
33 |
-
response_text
|
34 |
|
35 |
# Extract only the assistant's response
|
36 |
-
|
37 |
-
|
|
|
|
|
|
|
|
|
38 |
|
39 |
|
40 |
|
|
|
30 |
prompt = tokenizer.apply_chat_template(messages, template=custom_template, tokenize=False, add_generation_prompt=True)
|
31 |
inputs = tokenizer(prompt, return_tensors="pt").to(device) # Ensure inputs are on CUDA device
|
32 |
outputs = model.generate(**inputs, max_new_tokens=200, use_cache=True)
|
33 |
+
response_text r.batch_decode(outputs, skip_special_tokens=True)[0].strip()
|
34 |
|
35 |
# Extract only the assistant's response
|
36 |
+
start_idx = response_text.find("<|im_start|>assistant")
|
37 |
+
end_idx = response_text.find("<|im_end|>", start_idx)
|
38 |
+
assistant_response = response_text[start_idx + len("<|im_start|>assistant"):end_idx]
|
39 |
+
# Return only one answer
|
40 |
+
answers = assistant_response.split(". ")
|
41 |
+
return answers[0] + "."
|
42 |
|
43 |
|
44 |
|