File size: 1,077 Bytes
a067d85
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM, GenerationConfig

# ๅŠ ่ฝฝ DeepSeekMath ๆจกๅž‹ๅ’Œๅˆ†่ฏๅ™จ
model_name = "deepseek-ai/deepseek-math-7b-instruct"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.bfloat16, device_map="auto")
model.generation_config = GenerationConfig.from_pretrained(model_name)
model.generation_config.pad_token_id = model.generation_config.eos_token_id

# ๅฎšไน‰ๅธฆๆœ‰้“พๅผๆŽจ็†็š„ๆ•ฐๅญฆ้—ฎ้ข˜
messages = [
    {"role": "user", "content": "what is the integral of x^2 from 0 to 2?\nPlease reason step by step, and put your final answer within \\boxed{}."}
]

# ๅฐ†้—ฎ้ข˜่ฝฌๆขไธบๆจกๅž‹่พ“ๅ…ฅๆ ผๅผ
input_tensor = tokenizer.apply_chat_template(messages, add_generation_prompt=True, return_tensors="pt")

# ็”Ÿๆˆๆจกๅž‹็š„่พ“ๅ‡บ
outputs = model.generate(input_tensor.to(model.device), max_new_tokens=100)

# ่งฃ็ ่พ“ๅ‡บๅนถๆ‰“ๅฐ็ป“ๆžœ
result = tokenizer.decode(outputs[0][input_tensor.shape[1]:], skip_special_tokens=True)
print(result)