File size: 1,571 Bytes
62edff7
6f3a73d
 
 
 
 
 
 
 
 
57d7832
6f3a73d
 
 
 
fd296d0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6f3a73d
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
from typing import Dict, Any
from transformers import AutoModelForCausalLM, AutoTokenizer
from peft import PeftConfig, PeftModel
import torch.cuda

device = "cuda" if torch.cuda.is_available() else "cpu"

class EndpointHandler():
    def __init__(self, path=""):
        config = PeftConfig.from_pretrained(path)
        model = AutoModelForCausalLM.from_pretrained(config.base_model_name_or_path, device_map="auto")#, load_in_8bit=True, device_map='auto')
        self.tokenizer = AutoTokenizer.from_pretrained(config.base_model_name_or_path)
        self.model = PeftModel.from_pretrained(model, path)

    def __call__(self, data: Dict[str, Any]) -> Dict[str, Any]:
        """
        Args:
            data (Dict): The payload with the text prompt 
        and generation parameters.
        """
        # Get inputs
        prompt = data.pop("inputs", None)
        parameters = data.pop("parameters", None)
        if prompt is None:
            raise ValueError("Missing prompt.")
        # Preprocess
        input_ids = self.tokenizer(prompt, return_tensors="pt").input_ids.to(device)
        # Forward
        # if parameters is not None:
        #     output = self.model.generate(input_ids=input_ids, **parameters)
        # else:
        #     output = self.model.generate(input_ids=input_ids)
        output = self.model.generate(input_ids, temperature=0.9, max_new_tokens=50)
        # Postprocess
        prediction = self.tokenizer.batch_decode(outputs.detach().cpu().numpy(), skip_special_tokens=True)[0]
        return {"generated_text": prediction}