Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -88,25 +88,7 @@ def predict_chat(message: str, history: list):
|
|
88 |
# CORRECTED: Check against ctransformers.llm.LLM directly
|
89 |
if GGUF_AVAILABLE and isinstance(model, LLM):
|
90 |
print("Using GGUF model generation path.")
|
91 |
-
prompt_input =
|
92 |
-
for msg in messages:
|
93 |
-
if msg["role"] == "system":
|
94 |
-
prompt_input += f"{msg['content']}\n"
|
95 |
-
elif msg["role"] == "user":
|
96 |
-
prompt_input += f"User: {msg['content']}\n"
|
97 |
-
elif msg["role"] == "assistant":
|
98 |
-
prompt_input += f"Assistant: {msg['content']}\n"
|
99 |
-
prompt_input += "Assistant:"
|
100 |
-
|
101 |
-
# FIXED: Use the correct ctransformers method - call model() directly for streaming
|
102 |
-
try:
|
103 |
-
for token in model(
|
104 |
-
prompt_input,
|
105 |
-
max_new_tokens=MAX_NEW_TOKENS,
|
106 |
-
temperature=TEMPERATURE,
|
107 |
-
top_k=TOP_K,
|
108 |
-
top_p=TOP_P,
|
109 |
-
do_sample=DO_SAMPLE,
|
110 |
repetition_penalty=1.1,
|
111 |
stop=["User:", "\nUser", "\n#", "\n##", "<|endoftext|>"],
|
112 |
stream=True
|
@@ -122,7 +104,7 @@ def predict_chat(message: str, history: list):
|
|
122 |
temperature=TEMPERATURE,
|
123 |
top_k=TOP_K,
|
124 |
top_p=TOP_P,
|
125 |
-
do_sample=DO_SAMPLE,
|
126 |
repetition_penalty=1.1,
|
127 |
stop=["User:", "\nUser", "\n#", "\n##", "<|endoftext|>"]
|
128 |
)
|
@@ -141,7 +123,7 @@ def predict_chat(message: str, history: list):
|
|
141 |
temperature=TEMPERATURE,
|
142 |
top_k=TOP_K,
|
143 |
top_p=TOP_P,
|
144 |
-
do_sample=DO_SAMPLE,
|
145 |
pad_token_id=tokenizer.pad_token_id
|
146 |
)
|
147 |
generated_text = tokenizer.decode(outputs[0][inputs.shape[-1]:], skip_special_tokens=True).strip()
|
@@ -187,4 +169,4 @@ if __name__ == "__main__":
|
|
187 |
|
188 |
demo.chatbot.value = initial_messages_for_value
|
189 |
|
190 |
-
demo.launch()
|
|
|
88 |
# CORRECTED: Check against ctransformers.llm.LLM directly
|
89 |
if GGUF_AVAILABLE and isinstance(model, LLM):
|
90 |
print("Using GGUF model generation path.")
|
91 |
+
prompt_input Edo_sampledo_sample=DO_SAMPLE,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
92 |
repetition_penalty=1.1,
|
93 |
stop=["User:", "\nUser", "\n#", "\n##", "<|endoftext|>"],
|
94 |
stream=True
|
|
|
104 |
temperature=TEMPERATURE,
|
105 |
top_k=TOP_K,
|
106 |
top_p=TOP_P,
|
107 |
+
#do_sample=DO_SAMPLE,
|
108 |
repetition_penalty=1.1,
|
109 |
stop=["User:", "\nUser", "\n#", "\n##", "<|endoftext|>"]
|
110 |
)
|
|
|
123 |
temperature=TEMPERATURE,
|
124 |
top_k=TOP_K,
|
125 |
top_p=TOP_P,
|
126 |
+
#do_sample=DO_SAMPLE,
|
127 |
pad_token_id=tokenizer.pad_token_id
|
128 |
)
|
129 |
generated_text = tokenizer.decode(outputs[0][inputs.shape[-1]:], skip_special_tokens=True).strip()
|
|
|
169 |
|
170 |
demo.chatbot.value = initial_messages_for_value
|
171 |
|
172 |
+
demo.launch()
|