Tonic commited on
Commit
2ff4ae4
·
1 Parent(s): 8737ae5

Delete app.py

Browse files
Files changed (1) hide show
  1. app.py +0 -229
app.py DELETED
@@ -1,229 +0,0 @@
1
- from transformers import AutoModelForCausalLM, AutoTokenizer
2
- from tokenization_yi import YiTokenizer
3
- import torch
4
- import os
5
- import gradio as gr
6
- import sentencepiece
7
-
8
-
9
- DESCRIPTION = """
10
- # 👋🏻Welcome to 🙋🏻‍♂️Tonic's🧑🏻‍🚀YI-200K🚀"
11
- You can use this Space to test out the current model [Tonic/YI](https://huggingface.co/01-ai/Yi-34B)
12
- You can also use 🧑🏻‍🚀YI-200K🚀 by cloning this space. 🧬🔬🔍 Simply click here: <a style="display:inline-block" href="https://huggingface.co/spaces/Tonic1/YiTonic?duplicate=true"><img src="https://img.shields.io/badge/-Duplicate%20Space-blue?labelColor=white&style=flat&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAAXNSR0IArs4c6QAAAP5JREFUOE+lk7FqAkEURY+ltunEgFXS2sZGIbXfEPdLlnxJyDdYB62sbbUKpLbVNhyYFzbrrA74YJlh9r079973psed0cvUD4A+4HoCjsA85X0Dfn/RBLBgBDxnQPfAEJgBY+A9gALA4tcbamSzS4xq4FOQAJgCDwV2CPKV8tZAJcAjMMkUe1vX+U+SMhfAJEHasQIWmXNN3abzDwHUrgcRGmYcgKe0bxrblHEB4E/pndMazNpSZGcsZdBlYJcEL9Afo75molJyM2FxmPgmgPqlWNLGfwZGG6UiyEvLzHYDmoPkDDiNm9JR9uboiONcBXrpY1qmgs21x1QwyZcpvxt9NS09PlsPAAAAAElFTkSuQmCC&logoWidth=14" alt="Duplicate Space"></a></h3>
13
- Join us : 🌟TeamTonic🌟 is always making cool demos! Join our active builder's🛠️community on 👻Discord: [Discord](https://discord.gg/nXx5wbX9) On 🤗Huggingface: [TeamTonic](https://huggingface.co/TeamTonic) & [MultiTransformer](https://huggingface.co/MultiTransformer) On 🌐Github: [Polytonic](https://github.com/tonic-ai) & contribute to 🌟 [PolyGPT](https://github.com/tonic-ai/polygpt-alpha)
14
- """
15
-
16
- model_id = "01-ai/Yi-34B-200K"
17
-
18
- os.environ['PYTORCH_CUDA_ALLOC_CONF'] = 'max_split_size_mb:120'
19
- device = "cuda" if torch.cuda.is_available() else "cpu"
20
-
21
- tokenizer = YiTokenizer(vocab_file="./tokenizer.model")
22
- model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", load_in_8bit=True, offload_folder=offload_directory, trust_remote_code=True)
23
-
24
-
25
- def run(message, chat_history, max_new_tokens=4056, temperature=3.5, top_p=0.9, top_k=800):
26
- prompt = get_prompt(message, chat_history)
27
- input_ids = tokenizer.encode(prompt, return_tensors='pt')
28
- input_ids = input_ids.to(model.device)
29
- response_ids = model.generate(
30
- input_ids,
31
- max_length=max_new_tokens + input_ids.shape[1],
32
- temperature=temperature,
33
- top_p=top_p,
34
- top_k=top_k,
35
- pad_token_id=tokenizer.eos_token_id,
36
- do_sample=True
37
-
38
- )
39
-
40
- response = tokenizer.decode(response_ids[:, input_ids.shape[-1]:][0], skip_special_tokens=True)
41
- return response
42
-
43
- def get_prompt(message, chat_history):
44
- texts = []
45
-
46
- do_strip = False
47
- for user_input, response in chat_history:
48
- user_input = user_input.strip() if do_strip else user_input
49
- do_strip = True
50
- texts.append(f" {response.strip()} {user_input} ")
51
- message = message.strip() if do_strip else message
52
- texts.append(f"{message}")
53
- return ''.join(texts)
54
-
55
-
56
- MAX_MAX_NEW_TOKENS = 4056
57
- DEFAULT_MAX_NEW_TOKENS = 1256
58
- MAX_INPUT_TOKEN_LENGTH = 120000
59
-
60
- def clear_and_save_textbox(message): return '', message
61
-
62
- def display_input(message, history=[]):
63
- history.append((message, ''))
64
- return history
65
-
66
- def delete_prev_fn(history=[]):
67
- try:
68
- message, _ = history.pop()
69
- except IndexError:
70
- message = ''
71
- return history, message or ''
72
-
73
- def generate(message, history_with_input, max_new_tokens, temperature, top_p, top_k):
74
- if int(max_new_tokens) > MAX_MAX_NEW_TOKENS:
75
- raise ValueError
76
-
77
- history = history_with_input[:-1]
78
- response = run(message, history, max_new_tokens, temperature, top_p, top_k)
79
- yield history + [(message, response)]
80
-
81
-
82
- def process_example(message):
83
- generator = generate(message, [], 1024, 2.5, 0.95, 900)
84
- for x in generator:
85
- pass
86
- return '', x
87
-
88
- def check_input_token_length(message, chat_history):
89
- input_token_length = len(message) + len(chat_history)
90
- if input_token_length > MAX_INPUT_TOKEN_LENGTH:
91
- raise gr.Error(f"The accumulated input is too long ({input_token_length} > {MAX_INPUT_TOKEN_LENGTH}). Clear your chat history and try again.")
92
-
93
- with gr.Blocks(theme='ParityError/Anime') as demo:
94
- gr.Markdown(DESCRIPTION)
95
- with gr.Group():
96
- chatbot = gr.Chatbot(label='TonicYi-30B-200K')
97
- with gr.Row():
98
- textbox = gr.Textbox(
99
- container=False,
100
- show_label=False,
101
- placeholder='As the dawn approached, they leant in and said',
102
- scale=10
103
- )
104
- submit_button = gr.Button('Submit', variant='primary', scale=1, min_width=0)
105
-
106
- with gr.Row():
107
- retry_button = gr.Button('Retry', variant='secondary')
108
- undo_button = gr.Button('Undo', variant='secondary')
109
- clear_button = gr.Button('Clear', variant='secondary')
110
-
111
- saved_input = gr.State()
112
-
113
- with gr.Accordion(label='Advanced options', open=False):
114
- # system_prompt = gr.Textbox(label='System prompt', value=DEFAULT_SYSTEM_PROMPT, lines=5, interactive=False)
115
- max_new_tokens = gr.Slider(label='Max New Tokens', minimum=1, maximum=MAX_MAX_NEW_TOKENS, step=1, value=DEFAULT_MAX_NEW_TOKENS)
116
- temperature = gr.Slider(label='Temperature', minimum=0.1, maximum=4.0, step=0.1, value=0.1)
117
- top_p = gr.Slider(label='Top-P (nucleus sampling)', minimum=0.05, maximum=1.0, step=0.05, value=0.9)
118
- top_k = gr.Slider(label='Top-K', minimum=1, maximum=1000, step=1, value=10)
119
-
120
- textbox.submit(
121
- fn=clear_and_save_textbox,
122
- inputs=textbox,
123
- outputs=[textbox, saved_input],
124
- api_name=False,
125
- queue=False,
126
- ).then(
127
- fn=display_input,
128
- inputs=[saved_input, chatbot],
129
- outputs=chatbot,
130
- api_name=False,
131
- queue=False,
132
- ).then(
133
- fn=check_input_token_length,
134
- inputs=[saved_input, chatbot],
135
- api_name=False,
136
- queue=False,
137
- ).success(
138
- fn=generate,
139
- inputs=[
140
- saved_input,
141
- chatbot,
142
- max_new_tokens,
143
- temperature,
144
- top_p,
145
- top_k,
146
- ],
147
- outputs=chatbot,
148
- api_name="Generate",
149
- )
150
-
151
- button_event_preprocess = submit_button.click(
152
- fn=clear_and_save_textbox,
153
- inputs=textbox,
154
- outputs=[textbox, saved_input],
155
- api_name=False,
156
- queue=False,
157
- ).then(
158
- fn=display_input,
159
- inputs=[saved_input, chatbot],
160
- outputs=chatbot,
161
- api_name=False,
162
- queue=False,
163
- ).then(
164
- fn=check_input_token_length,
165
- inputs=[saved_input, chatbot],
166
- api_name=False,
167
- queue=False,
168
- ).success(
169
- fn=generate,
170
- inputs=[
171
- saved_input,
172
- chatbot,
173
- max_new_tokens,
174
- temperature,
175
- top_p,
176
- top_k,
177
- ],
178
- outputs=chatbot,
179
- api_name="Cgenerate",
180
- )
181
-
182
- retry_button.click(
183
- fn=delete_prev_fn,
184
- inputs=chatbot,
185
- outputs=[chatbot, saved_input],
186
- api_name=False,
187
- queue=False,
188
- ).then(
189
- fn=display_input,
190
- inputs=[saved_input, chatbot],
191
- outputs=chatbot,
192
- api_name=False,
193
- queue=False,
194
- ).then(
195
- fn=generate,
196
- inputs=[
197
- saved_input,
198
- chatbot,
199
- max_new_tokens,
200
- temperature,
201
- top_p,
202
- top_k,
203
- ],
204
- outputs=chatbot,
205
- api_name=False,
206
- )
207
-
208
- undo_button.click(
209
- fn=delete_prev_fn,
210
- inputs=chatbot,
211
- outputs=[chatbot, saved_input],
212
- api_name=False,
213
- queue=False,
214
- ).then(
215
- fn=lambda x: x,
216
- inputs=[saved_input],
217
- outputs=textbox,
218
- api_name=False,
219
- queue=False,
220
- )
221
-
222
- clear_button.click(
223
- fn=lambda: ([], ''),
224
- outputs=[chatbot, saved_input],
225
- queue=False,
226
- api_name=False,
227
- )
228
-
229
- demo.queue().launch(show_api=True)