Cyleux commited on
Commit
672cd49
·
1 Parent(s): 8cea444

Upload chat_final.py

Browse files
Files changed (1) hide show
  1. chat_final.py +142 -0
chat_final.py ADDED
@@ -0,0 +1,142 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ print('Loading...')
2
+ from src.model_run import RWKV_RNN
3
+ import numpy as np
4
+ import os, copy, types, gc, sys
5
+ import torch
6
+ from src.utils import TOKENIZER
7
+
8
+ torch.backends.cudnn.benchmark = False
9
+ torch.backends.cudnn.allow_tf32 = False
10
+ torch.backends.cuda.matmul.allow_tf32 = False
11
+ np.set_printoptions(precision=4, suppress=True, linewidth=200)
12
+
13
+ WORD_NAME = ["20B_tokenizer.json", "20B_tokenizer.json"]
14
+ UNKNOWN_CHAR = None
15
+ tokenizer = TOKENIZER(WORD_NAME, UNKNOWN_CHAR=UNKNOWN_CHAR)
16
+
17
+ args = types.SimpleNamespace()
18
+ args.RUN_DEVICE = "cpu"
19
+ args.FLOAT_MODE = "fp32"
20
+ args.vocab_size = 50277
21
+ # args.MODEL_NAME = 'zrwkv-37fifth'
22
+ args.MODEL_NAME = 'zrwkv-23fifth'
23
+
24
+ args.n_layer = 12
25
+ args.n_embd = 768
26
+ args.ctx_len = 1024
27
+
28
+ user = "User"
29
+ bot = "Daniel"
30
+ interface = ":"
31
+
32
+ os.environ["RWKV_RUN_DEVICE"] = args.RUN_DEVICE
33
+ MODEL_NAME = args.MODEL_NAME
34
+
35
+ print(f'loading... {MODEL_NAME}')
36
+ model = RWKV_RNN(args)
37
+
38
+ model_tokens = []
39
+ current_state = None
40
+
41
+ def run_rnn(tokens, newline_adj = 0):
42
+ global model_tokens, current_state
43
+ for i in range(len(tokens)):
44
+ model_tokens += [int(tokens[i])]
45
+ if i == len(tokens) - 1:
46
+ out, current_state = model.forward(model_tokens, current_state)
47
+ else:
48
+ current_state = model.forward(model_tokens, current_state, preprocess_only = True)
49
+
50
+ out[0] = -999999999
51
+ out[187] += newline_adj
52
+ return out
53
+
54
+ all_state = {}
55
+ def save_all_stat(name, last_out):
56
+ all_state[name] = {}
57
+ all_state[name]['out'] = last_out
58
+ all_state[name]['rnn'] = copy.deepcopy(current_state)
59
+ all_state[name]['token'] = copy.deepcopy(model_tokens)
60
+
61
+ def load_all_stat(name):
62
+ global model_tokens, current_state
63
+ current_state = copy.deepcopy(all_state[name]['rnn'])
64
+ model_tokens = copy.deepcopy(all_state[name]['token'])
65
+ return all_state[name]['out']
66
+
67
+ print(f'\nRun prompt...')
68
+
69
+ out = ""
70
+ gc.collect()
71
+
72
+ save_all_stat('chat_init', out)
73
+ save_all_stat('chat', out) # ensure that 'chat' key is added to all_state
74
+
75
+ print(f'### prompt ###\n[{tokenizer.tokenizer.decode(model_tokens)}]\n')
76
+
77
+
78
+ def reply_msg_generator():
79
+ while True:
80
+ msg = yield
81
+ print(f'{bot}{interface} {msg}\n')
82
+
83
+ def on_message_generator():
84
+ global model_tokens, current_state
85
+ message = yield # This yield allows us to receive the initial message
86
+ while True:
87
+ msg = message.replace('\\n','\n').strip()
88
+ if len(msg) > 10000:
89
+ message = yield 'your message is too long (max 1000 tokens)'
90
+
91
+ out = load_all_stat('chat')
92
+ new = f"{user}{interface} {msg}\n{bot}{interface}"
93
+ out = run_rnn(tokenizer.tokenizer.encode(new), newline_adj=-999999999)
94
+ save_all_stat('chat_pre', out)
95
+
96
+ begin = len(model_tokens)
97
+ out_last = begin
98
+ yield f'{bot}{interface}' # Yield the bot's prompt immediately
99
+ for i in range(8000):
100
+ token = tokenizer.sample_logits(
101
+ out,
102
+ model_tokens,
103
+ args.ctx_len,
104
+ temperature=1.0,
105
+ top_p_usual=0.85,
106
+ top_p_newline=0.85,
107
+ )
108
+ out = run_rnn([token], newline_adj=1)
109
+
110
+ xxx = tokenizer.tokenizer.decode(model_tokens[out_last:])
111
+ if '\ufffd' not in xxx and 'user' not in str(xxx).lower() and '\n' not in xxx and str(xxx) != ':' and str(xxx) != '\n\n' and len(str(xxx)) > 0:
112
+ yield xxx # Yield each part of the response as soon as it's ready
113
+ out_last = begin + i + 1
114
+ else:
115
+ out_last = begin + i + 1
116
+
117
+ send_msg = tokenizer.tokenizer.decode(model_tokens[begin:])
118
+ if '\ufffd' in send_msg or send_msg.endswith(f'{user}{interface}') or send_msg.endswith(f'{bot}{interface}') or '\n' in send_msg:
119
+ send_msg = send_msg.strip()
120
+ send_msg = send_msg.replace(f'{user}{interface}', '')
121
+ send_msg = send_msg.replace(f'{bot}{interface}', '')
122
+ send_msg = send_msg.replace('\n', '')
123
+ break
124
+ save_all_stat('chat', out)
125
+ yield '\n' # Yield a newline at the end of the response
126
+ message = yield # Get the next message
127
+
128
+ print('Start chatting with Daniel!')
129
+
130
+ on_message_gen = on_message_generator()
131
+ next_message = on_message_gen.__next__() # Start the generator
132
+ while True:
133
+ if next_message is None: # If the generator is ready for a new message
134
+ msg = input(f'{user}{interface} ')
135
+ if len(msg.strip()) > 0:
136
+ next_message = on_message_gen.send(msg) # Send the message to the generator and receive the next yield
137
+ else:
138
+ print('Error: please say something')
139
+ else: # If the generator has yielded part of the response
140
+ print(next_message, end='', flush=True)
141
+ next_message = next(on_message_gen) # Get the next part of the response
142
+