YuvrajSingh9886 commited on
Commit
02ba82b
·
verified ·
1 Parent(s): 4c291d2

Upload inference_sft.py

Browse files
Files changed (1) hide show
  1. inference_sft.py +113 -0
inference_sft.py ADDED
@@ -0,0 +1,113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from config import ModelArgs
2
+ from model import Llama
3
+ import torch
4
+ import torch.nn.functional as F
5
+ from tokenizer import Tokenizer
6
+ import argparse
7
+
8
+
9
+ tokenizer = Tokenizer()
10
+ tokenizer = tokenizer.ready_tokenizer()
11
+
12
+
13
+ def remove_hashtag_lines(text):
14
+ """Removes lines that contain hashtags from the given text."""
15
+ lines = text.split("\n")
16
+ cleaned_lines = [line for line in lines if "#" not in line]
17
+ return "\n".join(cleaned_lines)
18
+
19
+
20
+ def remove_prefix(state_dict, prefix):
21
+ new_state_dict = {}
22
+ for key, value in state_dict.items():
23
+ if key.startswith(prefix):
24
+ new_key = key[len(prefix):] # Remove the prefix
25
+ new_state_dict[new_key] = value
26
+ else:
27
+ new_state_dict[key] = value
28
+ return new_state_dict
29
+
30
+
31
+ def topk_sampling(model, prompt, device, max_length=50, top_k=50, temperature=1.0, frequency_penalty=0.5):
32
+ input_ids = tokenizer.encode(prompt, return_tensors='pt').to(device)
33
+ # generated_tokens = [] # Store generated tokens
34
+ token_frequencies = {} # Track token counts
35
+
36
+ for step in range(max_length):
37
+ with torch.no_grad():
38
+ outputs = model(input_ids)
39
+ logits = outputs[:, -1, :] # Get logits for next token
40
+
41
+ logits = logits / temperature
42
+ # # Step 1: Apply frequency penalty ONLY AFTER the first token is generated
43
+ if step > 0: # Skip penalty on first step
44
+ for token in input_ids[0].tolist():
45
+ token_frequencies[token] = token_frequencies.get(token, 0) + 1 # Count occurrences
46
+
47
+ # Modify logits AFTER counting
48
+ for token, freq in token_frequencies.items():
49
+ logits[0, token] -= frequency_penalty * (freq ** 0.8) # Apply soft penalty
50
+
51
+ # Convert logits to probabilities
52
+ probs = F.softmax(logits, dim=-1)
53
+
54
+ # Top-k filtering
55
+ top_k_probs, top_k_indices = torch.topk(probs, top_k, dim=-1)
56
+
57
+ # Apply temperature scaling
58
+ # probs = probs / temperature
59
+
60
+ # Sample from top-k
61
+ next_token = torch.multinomial(top_k_probs, num_samples=1)
62
+
63
+ # if next_token.item() == tokenizer.eos_token_id:
64
+ # break # Stop if EOS token is generated
65
+
66
+ # Store generated token AFTER sampling
67
+ # token_id = next_token.item()
68
+ # generated_tokens.append(token_id)
69
+
70
+ # Update input_ids for next step
71
+ xcol = torch.gather(top_k_indices, -1, next_token)
72
+
73
+ if xcol == tokenizer.eos_token_id:
74
+ break
75
+ # generated_tokens.append(xcol)
76
+ input_ids = torch.cat([input_ids, xcol], dim=1)
77
+
78
+ # Decode only the generated tokens
79
+ return tokenizer.decode(input_ids[0], skip_special_tokens=True)
80
+ def main():
81
+
82
+ # torch.set_float32_matmul_precision('high')
83
+
84
+ parser = argparse.ArgumentParser()
85
+ parser.add_argument("--prompt", type=str, default=''' Follow the given instructions carefully. My mom is about to retire from her 10 long years of service to a company. write me a message saying how grateful we are for her service to our company. ''')
86
+ parser.add_argument("--max_length", type=int, default=256)
87
+ parser.add_argument("--temperature", type=float, default=0.8)
88
+ # parser.add_argument("--repetition_penalty", type=float, default=1.2)
89
+ args = parser.parse_args()
90
+
91
+ model = Llama(device=ModelArgs.device, embeddings_dims=ModelArgs.embeddings_dims, no_of_decoder_layers=ModelArgs.no_of_decoder_layers, block_size=ModelArgs.block_size, vocab_size=ModelArgs.vocab_size, dropout=ModelArgs.dropout)
92
+ # model = torch.compile(model)
93
+ model = model.to(ModelArgs.device)
94
+
95
+ dict_model = torch.load('DPO_model_1650.pt')
96
+ dict_model['MODEL_STATE'] = remove_prefix(dict_model['MODEL_STATE'], '_orig_mod.')
97
+ model.load_state_dict(dict_model['MODEL_STATE'])
98
+ model.eval()
99
+ print("Model ready")
100
+ # prompt = 'Its a secret'
101
+
102
+ with torch.no_grad():
103
+ generated_text = topk_sampling(model, args.prompt, max_length=args.max_length, top_k=args.top_k, temperature=args.temperature, device=ModelArgs.device)
104
+ # generated_text = remove_hashtag_lines(generated_text)
105
+ print("Generated: ", generated_text)
106
+ # generated_text = beam_search(model, tokenizer, args.prompt, beam_width=5, max_length=50, temperature=1.0)
107
+ # print(args.prompt + generated_text)
108
+
109
+
110
+ if __name__ == '__main__':
111
+ main()
112
+
113
+