NicoNico commited on
Commit
e7b4310
·
verified ·
1 Parent(s): 2677e94

Update README.md

Browse files

apply chat template to the reasoning model example

Files changed (1) hide show
  1. README.md +17 -7
README.md CHANGED
@@ -1,9 +1,9 @@
1
- ---
2
- license: apache-2.0
3
- tags:
4
- - mlx
5
- base_model: GreenBitAI/DeepSeek-R1-Distill-Qwen-1.5B-layer-mix-bpw-4.0
6
- ---
7
 
8
  # GreenBitAI/DeepSeek-R1-Distill-Qwen-1.5B-layer-mix-bpw-4.0-mlx
9
 
@@ -20,5 +20,15 @@ pip install gbx-lm
20
  from gbx_lm import load, generate
21
 
22
  model, tokenizer = load("GreenBitAI/DeepSeek-R1-Distill-Qwen-1.5B-layer-mix-bpw-4.0-mlx")
23
- response = generate(model, tokenizer, prompt="hello", verbose=True)
 
 
 
 
 
 
 
 
 
 
24
  ```
 
1
+ ---
2
+ license: apache-2.0
3
+ tags:
4
+ - mlx
5
+ base_model: GreenBitAI/DeepSeek-R1-Distill-Qwen-1.5B-layer-mix-bpw-4.0
6
+ ---
7
 
8
  # GreenBitAI/DeepSeek-R1-Distill-Qwen-1.5B-layer-mix-bpw-4.0-mlx
9
 
 
20
  from gbx_lm import load, generate
21
 
22
  model, tokenizer = load("GreenBitAI/DeepSeek-R1-Distill-Qwen-1.5B-layer-mix-bpw-4.0-mlx")
23
+
24
+ prompt = "How can I make an apple cake"
25
+
26
+ if tokenizer.chat_template is not None:
27
+ messages = [{"role": "user", "content": prompt}]
28
+ prompt = tokenizer.apply_chat_template(
29
+ messages, add_generation_prompt=True
30
+ )
31
+ prompt = tokenizer.decode(prompt)
32
+
33
+ response = generate(model, tokenizer, prompt=prompt, verbose=True, max_tokens=4096)
34
  ```