aekpic877 commited on
Commit
4514961
·
verified ·
1 Parent(s): 1366f2a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -18
app.py CHANGED
@@ -1,19 +1,10 @@
1
  from transformers import AutoTokenizer, AutoModelForCausalLM
2
- import torch
3
- tokenizer = AutoTokenizer.from_pretrained("deepseek-ai/DeepSeek-Coder-V2-Lite-Base", trust_remote_code=True)
4
- model = AutoModelForCausalLM.from_pretrained("deepseek-ai/DeepSeek-Coder-V2-Lite-Base", trust_remote_code=True, torch_dtype=torch.bfloat16).cuda()
5
- input_text = """<|fim▁begin|>def quick_sort(arr):
6
- if len(arr) <= 1:
7
- return arr
8
- pivot = arr[0]
9
- left = []
10
- right = []
11
- <|fim▁hole|>
12
- if arr[i] < pivot:
13
- left.append(arr[i])
14
- else:
15
- right.append(arr[i])
16
- return quick_sort(left) + [pivot] + quick_sort(right)<|fim▁end|>"""
17
- inputs = tokenizer(input_text, return_tensors="pt").to(model.device)
18
- outputs = model.generate(**inputs, max_length=128)
19
- print(tokenizer.decode(outputs[0], skip_special_tokens=True)[len(input_text):])
 
1
  from transformers import AutoTokenizer, AutoModelForCausalLM
2
+ tokenizer = AutoTokenizer.from_pretrained("deepseek-ai/deepseek-coder-1.3b-instruct", trust_remote_code=True)
3
+ model = AutoModelForCausalLM.from_pretrained("deepseek-ai/deepseek-coder-1.3b-instruct", trust_remote_code=True, torch_dtype=torch.bfloat16).cuda()
4
+ messages=[
5
+ { 'role': 'user', 'content': "write a quick sort algorithm in python."}
6
+ ]
7
+ inputs = tokenizer.apply_chat_template(messages, add_generation_prompt=True, return_tensors="pt").to(model.device)
8
+ # tokenizer.eos_token_id is the id of <|EOT|> token
9
+ outputs = model.generate(inputs, max_new_tokens=512, do_sample=False, top_k=50, top_p=0.95, num_return_sequences=1, eos_token_id=tokenizer.eos_token_id)
10
+ print(tokenizer.decode(outputs[0][len(inputs[0]):], skip_special_tokens=True))