Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -19,7 +19,7 @@ def generate(
|
|
19 |
pipe = load_model(model_name)
|
20 |
|
21 |
# Set tokenize correctly. Otherwise ticking the box breaks it.
|
22 |
-
if model_name == "
|
23 |
prompt = user_input
|
24 |
else:
|
25 |
prompt = f"<|im_start|>user\n{user_input}<|im_end|>\n<|im_start|>assistant\n"
|
@@ -27,7 +27,7 @@ def generate(
|
|
27 |
temperature=temperature, top_k=top_k, top_p=top_p, repetition_penalty=1.10)
|
28 |
return outputs[0]["generated_text"]
|
29 |
|
30 |
-
model_choices = ["M4-ai/hyperion-medium-preview", "M4-ai/NeuralReyna-Mini-1.8B-v0.2", "aloobun/Cypher-Laser-Mixtral-2x1.8B-v0.1", "Locutusque/NeuralHyperion-Medium-Preview", "aloobun/Cypher-Mini-1.8B", "Locutusque/Hercules-2.0-Qwen1.5-1.8B", "Locutusque/Hercules-2.5-Mistral-7B", "
|
31 |
# What at the best options?
|
32 |
g = gr.Interface(
|
33 |
fn=generate,
|
|
|
19 |
pipe = load_model(model_name)
|
20 |
|
21 |
# Set tokenize correctly. Otherwise ticking the box breaks it.
|
22 |
+
if model_name == "M4-ai/tau-0.5B":
|
23 |
prompt = user_input
|
24 |
else:
|
25 |
prompt = f"<|im_start|>user\n{user_input}<|im_end|>\n<|im_start|>assistant\n"
|
|
|
27 |
temperature=temperature, top_k=top_k, top_p=top_p, repetition_penalty=1.10)
|
28 |
return outputs[0]["generated_text"]
|
29 |
|
30 |
+
model_choices = ["M4-ai/hyperion-medium-preview", "M4-ai/NeuralReyna-Mini-1.8B-v0.2", "aloobun/Cypher-Laser-Mixtral-2x1.8B-v0.1", "Locutusque/NeuralHyperion-Medium-Preview", "aloobun/Cypher-Mini-1.8B", "Locutusque/Hercules-2.0-Qwen1.5-1.8B", "Locutusque/Hercules-2.5-Mistral-7B", "M4-ai/tau-0.5B"]
|
31 |
# What at the best options?
|
32 |
g = gr.Interface(
|
33 |
fn=generate,
|