File size: 1,077 Bytes
a067d85 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 |
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM, GenerationConfig
# ๅ ่ฝฝ DeepSeekMath ๆจกๅๅๅ่ฏๅจ
model_name = "deepseek-ai/deepseek-math-7b-instruct"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.bfloat16, device_map="auto")
model.generation_config = GenerationConfig.from_pretrained(model_name)
model.generation_config.pad_token_id = model.generation_config.eos_token_id
# ๅฎไนๅธฆๆ้พๅผๆจ็็ๆฐๅญฆ้ฎ้ข
messages = [
{"role": "user", "content": "what is the integral of x^2 from 0 to 2?\nPlease reason step by step, and put your final answer within \\boxed{}."}
]
# ๅฐ้ฎ้ข่ฝฌๆขไธบๆจกๅ่พๅ
ฅๆ ผๅผ
input_tensor = tokenizer.apply_chat_template(messages, add_generation_prompt=True, return_tensors="pt")
# ็ๆๆจกๅ็่พๅบ
outputs = model.generate(input_tensor.to(model.device), max_new_tokens=100)
# ่งฃ็ ่พๅบๅนถๆๅฐ็ปๆ
result = tokenizer.decode(outputs[0][input_tensor.shape[1]:], skip_special_tokens=True)
print(result)
|