Spaces:
Runtime error
Runtime error
michailroussos
commited on
Commit
·
db497f0
1
Parent(s):
bafd5e5
more changes
Browse files
app.py
CHANGED
@@ -19,47 +19,32 @@ FastLanguageModel.for_inference(model) # Enable optimized inference
|
|
19 |
|
20 |
# Define the response function
|
21 |
def respond(message, history, system_message, max_tokens, temperature, top_p):
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
messages.append({"role": "user", "content": user_msg})
|
28 |
-
if assistant_msg:
|
29 |
-
messages.append({"role": "assistant", "content": assistant_msg})
|
30 |
-
|
31 |
-
# Add the user's current message
|
32 |
messages.append({"role": "user", "content": message})
|
33 |
|
34 |
-
#
|
35 |
inputs = tokenizer.apply_chat_template(
|
36 |
messages,
|
37 |
tokenize=True,
|
38 |
add_generation_prompt=True,
|
39 |
return_tensors="pt",
|
40 |
-
)
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
output = model.generate(
|
46 |
input_ids=inputs,
|
47 |
-
|
48 |
max_new_tokens=max_tokens,
|
|
|
49 |
temperature=temperature,
|
50 |
top_p=top_p,
|
51 |
-
pad_token_id=tokenizer.eos_token_id, # Ensure padding is replaced with EOS
|
52 |
)
|
53 |
-
print("output")
|
54 |
-
print(output)
|
55 |
-
# Decode the generated output
|
56 |
-
response = tokenizer.decode(
|
57 |
-
output[0], skip_special_tokens=True
|
58 |
-
).strip() # Remove any extra whitespace or unexpected tokens
|
59 |
-
|
60 |
|
61 |
-
# Yield the clean response for display
|
62 |
-
yield response
|
63 |
|
64 |
|
65 |
# Define the Gradio interface
|
|
|
19 |
|
20 |
# Define the response function
|
21 |
def respond(message, history, system_message, max_tokens, temperature, top_p):
|
22 |
+
# Combine system and user inputs
|
23 |
+
messages = [{"role": "system", "content": system_message}] + [
|
24 |
+
{"role": "user", "content": user_msg} if assistant_msg is None else {"role": "assistant", "content": assistant_msg}
|
25 |
+
for user_msg, assistant_msg in history
|
26 |
+
]
|
|
|
|
|
|
|
|
|
|
|
27 |
messages.append({"role": "user", "content": message})
|
28 |
|
29 |
+
# Apply the chat template
|
30 |
inputs = tokenizer.apply_chat_template(
|
31 |
messages,
|
32 |
tokenize=True,
|
33 |
add_generation_prompt=True,
|
34 |
return_tensors="pt",
|
35 |
+
).to("cuda" if torch.cuda.is_available() else "cpu")
|
36 |
+
|
37 |
+
# Use a TextStreamer for real-time decoding
|
38 |
+
streamer = TextStreamer(tokenizer, skip_prompt=True)
|
39 |
+
model.generate(
|
|
|
40 |
input_ids=inputs,
|
41 |
+
streamer=streamer,
|
42 |
max_new_tokens=max_tokens,
|
43 |
+
use_cache=True,
|
44 |
temperature=temperature,
|
45 |
top_p=top_p,
|
|
|
46 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
47 |
|
|
|
|
|
48 |
|
49 |
|
50 |
# Define the Gradio interface
|