ysharma HF staff commited on
Commit
2e05556
·
1 Parent(s): 13864f1

create app.py

Browse files
Files changed (1) hide show
  1. app.py +233 -0
app.py ADDED
@@ -0,0 +1,233 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import json
3
+ import requests
4
+ import os
5
+ from text_generation import Client, InferenceAPIClient
6
+
7
+ #Streaming endpoint for OPENAI ChatGPT
8
+ API_URL = "https://api.openai.com/v1/chat/completions"
9
+ #Streaming endpoint for OPENCHATKIT
10
+ os.environ['API_URL_TGTHR'] = 'https://openchat.ngrok.io'
11
+
12
+ #Predict function for CHATGPT
13
+ def predict_chatgpt(inputs, top_p_chatgpt, temperature_chatgpt, openai_api_key, chat_counter_chatgpt, chatbot_chatgpt=[], history=[]):
14
+ #Define payload and header for chatgpt API
15
+ payload = {
16
+ "model": "gpt-3.5-turbo",
17
+ "messages": [{"role": "user", "content": f"{inputs}"}],
18
+ "temperature" : 1.0,
19
+ "top_p":1.0,
20
+ "n" : 1,
21
+ "stream": True,
22
+ "presence_penalty":0,
23
+ "frequency_penalty":0,
24
+ }
25
+
26
+ headers = {
27
+ "Content-Type": "application/json",
28
+ "Authorization": f"Bearer {openai_api_key}"
29
+ }
30
+ #debug
31
+ #print(f"chat_counter_chatgpt - {chat_counter_chatgpt}")
32
+
33
+ #Handling the different roles for ChatGPT
34
+ if chat_counter_chatgpt != 0 :
35
+ messages=[]
36
+ for data in chatbot:
37
+ temp1 = {}
38
+ temp1["role"] = "user"
39
+ temp1["content"] = data[0]
40
+ temp2 = {}
41
+ temp2["role"] = "assistant"
42
+ temp2["content"] = data[1]
43
+ messages.append(temp1)
44
+ messages.append(temp2)
45
+ temp3 = {}
46
+ temp3["role"] = "user"
47
+ temp3["content"] = inputs
48
+ messages.append(temp3)
49
+ payload = {
50
+ "model": "gpt-3.5-turbo",
51
+ "messages": messages, #[{"role": "user", "content": f"{inputs}"}],
52
+ "temperature" : temperature, #1.0,
53
+ "top_p": top_p, #1.0,
54
+ "n" : 1,
55
+ "stream": True,
56
+ "presence_penalty":0,
57
+ "frequency_penalty":0,
58
+ }
59
+
60
+ chat_counter_chatgpt+=1
61
+
62
+ history.append(inputs)
63
+ # make a POST request to the API endpoint using the requests.post method, passing in stream=True
64
+ response = requests.post(API_URL, headers=headers, json=payload, stream=True)
65
+ token_counter = 0
66
+ partial_words = ""
67
+
68
+ counter=0
69
+ for chunk in response.iter_lines():
70
+ #Skipping the first chunk
71
+ if counter == 0:
72
+ counter+=1
73
+ continue
74
+ # check whether each line is non-empty
75
+ if chunk.decode() :
76
+ chunk = chunk.decode()
77
+ # decode each line as response data is in bytes
78
+ if len(chunk) > 12 and "delta" in json.loads(chunk[6:])['choices'][0]:
79
+ partial_words = partial_words + json.loads(chunk[6:])['choices'][0]["delta"]["content"]
80
+ if token_counter == 0:
81
+ history.append(" " + partial_words)
82
+ else:
83
+ history[-1] = partial_words
84
+ chat = [(history[i], history[i + 1]) for i in range(0, len(history) - 1, 2) ] # convert to tuples of list
85
+ token_counter+=1
86
+ yield chat, history, chat_counter_chatgpt # this resembles {chatbot: chat, state: history}
87
+
88
+ #Predict function for OPENCHATKIT
89
+ def predict_together(model: str,
90
+ inputs: str,
91
+ top_p: float,
92
+ temperature: float,
93
+ top_k: int,
94
+ repetition_penalty: float,
95
+ watermark: bool,
96
+ chatbot,
97
+ history,):
98
+
99
+ client = Client(os.getenv("API_URL_TGTHR")) #get_client(model)
100
+ # debug
101
+ print(f"^^client is - {client}")
102
+ user_name, assistant_name = "<human>:", "<bot>:" #get_usernames(model)
103
+
104
+ history.append(inputs)
105
+
106
+ past = []
107
+ for data in chatbot:
108
+ user_data, model_data = data
109
+
110
+ if not user_data.startswith(user_name):
111
+ user_data = user_name + user_data
112
+ if not model_data.startswith("\n\n" + assistant_name):
113
+ model_data = "\n\n" + assistant_name + model_data
114
+
115
+ past.append(user_data + model_data + "\n\n")
116
+
117
+ if not inputs.startswith(user_name):
118
+ inputs = user_name + inputs
119
+
120
+ total_inputs = "".join(past) + inputs + "\n\n" + assistant_name
121
+ # truncate total_inputs
122
+ total_inputs = total_inputs[-1000:]
123
+
124
+ partial_words = ""
125
+
126
+ for i, response in enumerate(client.generate_stream(
127
+ total_inputs,
128
+ top_p=top_p,
129
+ top_k=top_k,
130
+ repetition_penalty=repetition_penalty,
131
+ watermark=watermark,
132
+ temperature=temperature,
133
+ max_new_tokens=500,
134
+ stop_sequences=[user_name.rstrip(), assistant_name.rstrip()],
135
+ )):
136
+ if response.token.special:
137
+ continue
138
+
139
+ partial_words = partial_words + response.token.text
140
+ if partial_words.endswith(user_name.rstrip()):
141
+ partial_words = partial_words.rstrip(user_name.rstrip())
142
+ if partial_words.endswith(assistant_name.rstrip()):
143
+ partial_words = partial_words.rstrip(assistant_name.rstrip())
144
+
145
+ if i == 0:
146
+ history.append(" " + partial_words)
147
+ else:
148
+ history[-1] = partial_words
149
+
150
+ chat = [
151
+ (history[i].strip(), history[i + 1].strip()) for i in range(0, len(history) - 1, 2)
152
+ ]
153
+ yield chat, history
154
+
155
+
156
+ def reset_textbox():
157
+ return gr.update(value="")
158
+
159
+ def reset_chat(chatbot, state):
160
+ # debug
161
+ print(f"^^chatbot value is - {chatbot}")
162
+ print(f"^^state value is - {state}")
163
+ return None, []
164
+
165
+
166
+ title = """<h1 align="center">🚀ChatGPT & 🔥OpenChatKit Comparison Gradio Demo</h1>"""
167
+ description = """Language models can be conditioned to act like dialogue agents through a conversational prompt that typically takes the form:
168
+ ```
169
+ User: <utterance>
170
+ Assistant: <utterance>
171
+ User: <utterance>
172
+ Assistant: <utterance>
173
+ ...
174
+ ```
175
+ In this app, you can explore the outputs of multiple LLMs when prompted in similar ways.
176
+ """
177
+
178
+ with gr.Blocks(css="""#col_container {width: 1000px; margin-left: auto; margin-right: auto;}
179
+ #chatgpt {height: 520px; overflow: auto;}
180
+ #chattogether {height: 520px; overflow: auto;} """ ) as demo:
181
+ #clear {width: 100px; height:50px; font-size:12px}""") as demo:
182
+ gr.HTML(title)
183
+ with gr.Row():
184
+ with gr.Column(scale=14):
185
+ with gr.Box():
186
+ with gr.Row():
187
+ with gr.Column(scale=13):
188
+ openai_api_key = gr.Textbox(type='password', label="Enter your OpenAI API key here for ChatGPT")
189
+ inputs = gr.Textbox(placeholder="Hi there!", label="Type an input and press Enter" )
190
+ with gr.Column(scale=1):
191
+ #b1 = gr.Button(elem_id = 'run')
192
+ b2 = gr.Button('Clear up Chatbots!', elem_id = 'clear').style(full_width=True)
193
+ state_chatgpt = gr.State([])
194
+ state_together = gr.State([])
195
+
196
+ with gr.Box():
197
+ with gr.Row():
198
+ chatbot_chatgpt = gr.Chatbot(elem_id="chatgpt", label='ChatGPT API - OPENAI')
199
+ chatbot_together = gr.Chatbot(elem_id="chattogether", label='OpenChatKit - Text Generation')
200
+
201
+ with gr.Column(scale=2, elem_id='parameters'):
202
+ with gr.Box():
203
+ gr.HTML("Parameters for #OpenCHAtToolKit")
204
+ top_p = gr.Slider(minimum=-0, maximum=1.0,value=0.95, step=0.05,interactive=True, label="Top-p",)
205
+ temperature = gr.Slider(minimum=-0, maximum=5.0, value=0.5, step=0.1, interactive=True, label="Temperature", )
206
+ top_k = gr.Slider( minimum=1, maximum=50, value=4, step=1, interactive=True, label="Top-k",)
207
+ repetition_penalty = gr.Slider( minimum=0.1, maximum=3.0, value=1.03, step=0.01, interactive=True, label="Repetition Penalty",)
208
+ watermark = gr.Checkbox(value=True, label="Text watermarking")
209
+ model = gr.CheckboxGroup(value="Rallio67/joi2_20B_instruct_alpha",
210
+ choices=["togethercomputer/GPT-NeoXT-Chat-Base-20B", "Rallio67/joi2_20B_instruct_alpha", "google/flan-t5-xxl", "google/flan-ul2", "bigscience/bloomz", "EleutherAI/gpt-neox-20b",],
211
+ label="Model",visible=False,)
212
+ temp_textbox_together = gr.Textbox(value=model.choices[0], visible=False)
213
+
214
+ with gr.Box():
215
+ gr.HTML("Parameters for OpenAI's ChatGPT")
216
+ top_p_chatgpt = gr.Slider( minimum=-0, maximum=1.0, value=1.0, step=0.05, interactive=True, label="Top-p",)
217
+ temperature_chatgpt = gr.Slider( minimum=-0, maximum=5.0, value=1.0, step=0.1, interactive=True, label="Temperature",)
218
+ chat_counter_chatgpt = gr.Number(value=0, visible=False, precision=0)
219
+
220
+ inputs.submit(reset_textbox, [], [inputs])
221
+
222
+ inputs.submit( predict_chatgpt,
223
+ [inputs, top_p_chatgpt, temperature_chatgpt, openai_api_key, chat_counter_chatgpt, chatbot_chatgpt, state_chatgpt],
224
+ [chatbot_chatgpt, state_chatgpt, chat_counter_chatgpt],)
225
+ inputs.submit( predict_together,
226
+ [temp_textbox_together, inputs, top_p, temperature, top_k, repetition_penalty, watermark, chatbot_together, state_together, ],
227
+ [chatbot_together, state_together],)
228
+
229
+ b2.click(reset_chat, [chatbot_chatgpt, state_chatgpt], [chatbot_chatgpt, state_chatgpt])
230
+ b2.click(reset_chat, [chatbot_together, state_together], [chatbot_together, state_together])
231
+
232
+ gr.Markdown(description)
233
+ demo.queue(concurrency_count=16).launch(height= 2500, debug=True)