from accelerate import init_empty_weights, load_checkpoint_and_dispatch from transformers import AutoTokenizer, AutoModelForCausalLM class EndpointHandler: def __init__(self, model_dir: str, **kw): self.tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True) with init_empty_weights(): model = AutoModelForCausalLM.from_pretrained( model_dir, torch_dtype="auto", trust_remote_code=True ) self.model = load_checkpoint_and_dispatch( model, checkpoint=model_dir, device_map="auto" ) # 自动跨 GPU 切层 def __call__(self, data): prompt = data["inputs"] inputs = self.tokenizer( prompt, return_tensors="pt" ).to("cuda:0") # 👈 把 input_ids/attention_mask 都放到 0 号卡 out_ids = self.model.generate( **inputs, max_new_tokens=256, ) return { "generated_text": self.tokenizer.decode( out_ids[0], skip_special_tokens=True ) }