Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -26,11 +26,13 @@ def generate(
|
|
26 |
tokenizer.eos_token = "<|im_end|>"
|
27 |
print(tokenizer)
|
28 |
pipe.tokenizer = tokenizer
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
prompt
|
33 |
-
|
|
|
|
|
34 |
|
35 |
streamer = TextIteratorStreamer(pipe.tokenizer, timeout=240.0, skip_prompt=True, skip_special_tokens=True)
|
36 |
generation_kwargs = dict(
|
@@ -60,7 +62,7 @@ def generate(
|
|
60 |
print(f"An error occurred: {e}")
|
61 |
yield "An error occurred during generation."
|
62 |
|
63 |
-
model_choices = ["Locutusque/Apollo-2.0-Llama-3.1-8B", "Locutusque/
|
64 |
# What at the best options?
|
65 |
g = gr.ChatInterface(
|
66 |
fn=generate,
|
|
|
26 |
tokenizer.eos_token = "<|im_end|>"
|
27 |
print(tokenizer)
|
28 |
pipe.tokenizer = tokenizer
|
29 |
+
if model_name == "Locutusque/TinyMistral-248M":
|
30 |
+
prompt = prompt
|
31 |
+
else:
|
32 |
+
prompt = f"<|im_start|>system\n{system}<|im_end|>\n"
|
33 |
+
for (user_turn, assistant_turn) in history:
|
34 |
+
prompt += f"<|im_start|>user\n{user_turn}<|im_end|>\n<|im_start|>assistant\n{assistant_turn}<|im_end|>\n"
|
35 |
+
prompt += f"<|im_start|>user\n{message}<|im_end|>\n<|im_start|>assistant\n"
|
36 |
|
37 |
streamer = TextIteratorStreamer(pipe.tokenizer, timeout=240.0, skip_prompt=True, skip_special_tokens=True)
|
38 |
generation_kwargs = dict(
|
|
|
62 |
print(f"An error occurred: {e}")
|
63 |
yield "An error occurred during generation."
|
64 |
|
65 |
+
model_choices = ["Locutusque/Apollo-2.0-Llama-3.1-8B", "Locutusque/TinyMistral-248M", "Locutusque/Hercules-6.1-Llama-3.1-8B", "Locutusque/DareQwen-2.5-7B", "Locutusque/Hercules-5.0-Index-1.9B", "Locutusque/StockQwen-2.5-7B"]
|
66 |
# What at the best options?
|
67 |
g = gr.ChatInterface(
|
68 |
fn=generate,
|