Locutusque commited on
Commit
55ffc20
·
verified ·
1 Parent(s): 241b5b0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -19,7 +19,7 @@ def generate(
19
  pipe = load_model(model_name)
20
 
21
  # Set tokenize correctly. Otherwise ticking the box breaks it.
22
- if model_name == "Locutusque/TinyMistral-248M-v3":
23
  prompt = user_input
24
  else:
25
  prompt = f"<|im_start|>user\n{user_input}<|im_end|>\n<|im_start|>assistant\n"
@@ -27,7 +27,7 @@ def generate(
27
  temperature=temperature, top_k=top_k, top_p=top_p, repetition_penalty=1.10)
28
  return outputs[0]["generated_text"]
29
 
30
- model_choices = ["M4-ai/hyperion-medium-preview", "M4-ai/NeuralReyna-Mini-1.8B-v0.2", "aloobun/Cypher-Laser-Mixtral-2x1.8B-v0.1", "Locutusque/NeuralHyperion-Medium-Preview", "aloobun/Cypher-Mini-1.8B", "Locutusque/Hercules-2.0-Qwen1.5-1.8B", "Locutusque/Hercules-2.5-Mistral-7B", "Locutusque/hyperion-small-preview"]
31
  # What at the best options?
32
  g = gr.Interface(
33
  fn=generate,
 
19
  pipe = load_model(model_name)
20
 
21
  # Set tokenize correctly. Otherwise ticking the box breaks it.
22
+ if model_name == "M4-ai/tau-0.5B":
23
  prompt = user_input
24
  else:
25
  prompt = f"<|im_start|>user\n{user_input}<|im_end|>\n<|im_start|>assistant\n"
 
27
  temperature=temperature, top_k=top_k, top_p=top_p, repetition_penalty=1.10)
28
  return outputs[0]["generated_text"]
29
 
30
+ model_choices = ["M4-ai/hyperion-medium-preview", "M4-ai/NeuralReyna-Mini-1.8B-v0.2", "aloobun/Cypher-Laser-Mixtral-2x1.8B-v0.1", "Locutusque/NeuralHyperion-Medium-Preview", "aloobun/Cypher-Mini-1.8B", "Locutusque/Hercules-2.0-Qwen1.5-1.8B", "Locutusque/Hercules-2.5-Mistral-7B", "M4-ai/tau-0.5B"]
31
  # What at the best options?
32
  g = gr.Interface(
33
  fn=generate,