Spaces:
Runtime error
Runtime error
michailroussos
commited on
Commit
·
e8ace7a
1
Parent(s):
80bc875
more
Browse files
app.py
CHANGED
@@ -19,14 +19,16 @@ FastLanguageModel.for_inference(model) # Enable optimized inference
|
|
19 |
|
20 |
# Define the response function
|
21 |
def respond(message, history, system_message, max_tokens, temperature, top_p):
|
22 |
-
# Combine system and
|
23 |
-
messages = [{"role": "system", "content": system_message}]
|
24 |
-
|
25 |
-
|
26 |
-
|
|
|
|
|
27 |
messages.append({"role": "user", "content": message})
|
28 |
|
29 |
-
#
|
30 |
inputs = tokenizer.apply_chat_template(
|
31 |
messages,
|
32 |
tokenize=True,
|
@@ -34,8 +36,21 @@ def respond(message, history, system_message, max_tokens, temperature, top_p):
|
|
34 |
return_tensors="pt",
|
35 |
).to("cuda" if torch.cuda.is_available() else "cpu")
|
36 |
|
37 |
-
# Use
|
38 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
39 |
_ = model.generate(
|
40 |
input_ids=inputs,
|
41 |
max_new_tokens=max_tokens,
|
|
|
19 |
|
20 |
# Define the response function
|
21 |
def respond(message, history, system_message, max_tokens, temperature, top_p):
|
22 |
+
# Combine system message and conversation history
|
23 |
+
messages = [{"role": "system", "content": system_message}]
|
24 |
+
for user_msg, assistant_msg in history:
|
25 |
+
if user_msg:
|
26 |
+
messages.append({"role": "user", "content": user_msg})
|
27 |
+
if assistant_msg:
|
28 |
+
messages.append({"role": "assistant", "content": assistant_msg})
|
29 |
messages.append({"role": "user", "content": message})
|
30 |
|
31 |
+
# Tokenize inputs
|
32 |
inputs = tokenizer.apply_chat_template(
|
33 |
messages,
|
34 |
tokenize=True,
|
|
|
36 |
return_tensors="pt",
|
37 |
).to("cuda" if torch.cuda.is_available() else "cpu")
|
38 |
|
39 |
+
# Use TextStreamer to process and yield outputs incrementally
|
40 |
+
class GradioStreamer(TextStreamer):
|
41 |
+
def __init__(self, tokenizer, *args, **kwargs):
|
42 |
+
super().__init__(tokenizer, *args, **kwargs)
|
43 |
+
self.generated_text = ""
|
44 |
+
|
45 |
+
def on_token(self, token_id):
|
46 |
+
token = self.tokenizer.decode(token_id, skip_special_tokens=True)
|
47 |
+
self.generated_text += token
|
48 |
+
yield self.generated_text
|
49 |
+
|
50 |
+
# Initialize Gradio-compatible streamer
|
51 |
+
streamer = GradioStreamer(tokenizer, skip_prompt=True)
|
52 |
+
|
53 |
+
# Generate response with streaming
|
54 |
_ = model.generate(
|
55 |
input_ids=inputs,
|
56 |
max_new_tokens=max_tokens,
|