Locutusque commited on
Commit
2ff2190
·
verified ·
1 Parent(s): 65232f8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -26,7 +26,7 @@ def generate(
26
  tokenizer.eos_token = "<|im_end|>"
27
  print(tokenizer)
28
  pipe.tokenizer = tokenizer
29
- if model_name == "Locutusque/TinyMistral-248M":
30
  prompt = " ".join(history) + message
31
  else:
32
  prompt = f"<|im_start|>system\n{system}<|im_end|>\n"
@@ -62,7 +62,7 @@ def generate(
62
  print(f"An error occurred: {e}")
63
  yield "An error occurred during generation."
64
 
65
- model_choices = ["Locutusque/Apollo-2.0-Llama-3.1-8B", "Locutusque/TinyMistral-248M", "Locutusque/Hercules-6.1-Llama-3.1-8B", "Locutusque/DareQwen-2.5-7B", "Locutusque/Hercules-5.0-Index-1.9B", "Locutusque/StockQwen-2.5-7B"]
66
  # What at the best options?
67
  g = gr.ChatInterface(
68
  fn=generate,
 
26
  tokenizer.eos_token = "<|im_end|>"
27
  print(tokenizer)
28
  pipe.tokenizer = tokenizer
29
+ if model_name == "Locutusque/TinyMistral-248M-v3":
30
  prompt = " ".join(history) + message
31
  else:
32
  prompt = f"<|im_start|>system\n{system}<|im_end|>\n"
 
62
  print(f"An error occurred: {e}")
63
  yield "An error occurred during generation."
64
 
65
+ model_choices = ["Locutusque/Apollo-2.0-Llama-3.1-8B", "Locutusque/TinyMistral-248M-v3", "Locutusque/Hercules-6.1-Llama-3.1-8B", "Locutusque/DareQwen-2.5-7B", "Locutusque/Hercules-5.0-Index-1.9B", "Locutusque/StockQwen-2.5-7B"]
66
  # What at the best options?
67
  g = gr.ChatInterface(
68
  fn=generate,