analist commited on
Commit
95d3dff
·
verified ·
1 Parent(s): ff0f4cf

Create handler.py

Browse files
Files changed (1) hide show
  1. handler.py +47 -0
handler.py ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from unsloth import FastLanguageModel
2
+ from transformers import AutoTokenizer
3
+ import torch
4
+
5
+ class EndpointHandler:
6
+ def __init__(self, path=""):
7
+ self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
8
+
9
+ self.tokenizer = AutoTokenizer.from_pretrained(path, use_fast=True)
10
+ self.model, _ = FastLanguageModel.from_pretrained(
11
+ model_name=path,
12
+ max_seq_length=2048,
13
+ dtype=torch.float16,
14
+ load_in_4bit=True,
15
+ )
16
+ self.model.to(self.device)
17
+ self.model.eval()
18
+
19
+ def __call__(self, data):
20
+ prompt = data.get("inputs", "")
21
+ if not prompt:
22
+ return {"error": "Missing 'inputs' in request payload."}
23
+
24
+ generation_params = {
25
+ "max_new_tokens": data.get("max_new_tokens", 128),
26
+ "temperature": data.get("temperature", 0.7),
27
+ "top_p": data.get("top_p", 0.9),
28
+ "top_k": data.get("top_k", 50),
29
+ "do_sample": data.get("do_sample", True),
30
+ "repetition_penalty": data.get("repetition_penalty", 1.1),
31
+ }
32
+
33
+ inputs = self.tokenizer(prompt, return_tensors="pt").to(self.device)
34
+
35
+ with torch.no_grad():
36
+ outputs = self.model.generate(
37
+ **inputs,
38
+ **generation_params
39
+ )
40
+
41
+ generated_text = self.tokenizer.decode(
42
+ outputs[0],
43
+ skip_special_tokens=True,
44
+ clean_up_tokenization_spaces=True
45
+ )
46
+
47
+ return {"generated_text": generated_text}