Mattral commited on
Commit
ff96349
Β·
verified Β·
1 Parent(s): d9fa5e3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +49 -164
app.py CHANGED
@@ -1,59 +1,44 @@
1
- import os
2
- from typing import Iterator
3
-
4
  import gradio as gr
 
5
 
6
- from src.model import run
7
-
8
- HF_PUBLIC = os.environ.get("HF_PUBLIC", False)
 
 
 
 
 
 
 
 
 
9
 
10
  DEFAULT_SYSTEM_PROMPT = "You are Phoenix AI Healthcare. You are professional, you are polite, give only truthful information and are based on the Mistral-7B model from Mistral AI about Healtcare and Wellness. You can communicate in different languages equally well."
 
11
  MAX_MAX_NEW_TOKENS = 4096
12
  DEFAULT_MAX_NEW_TOKENS = 256
13
  MAX_INPUT_TOKEN_LENGTH = 4000
14
 
15
  DESCRIPTION = """
16
- # [Mistral-7B](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.1)
 
17
  """
18
 
19
-
20
  def clear_and_save_textbox(message: str) -> tuple[str, str]:
21
- """
22
- Clear the textbox and save the input to a state variable.
23
- :param message: The input message.
24
- :return: A tuple of the empty string and the input message.
25
- """
26
  return "", message
27
 
28
-
29
- def display_input(
30
- message: str, history: list[tuple[str, str]]
31
- ) -> list[tuple[str, str]]:
32
- """
33
- Display the input message in the chat history.
34
- :param message: The input message.
35
- :param history: The chat history.
36
- :return: The chat history with the input message appended.
37
- """
38
  history.append((message, ""))
39
  return history
40
 
41
-
42
- def delete_prev_fn(
43
- history: list[tuple[str, str]]) -> tuple[list[tuple[str, str]], str]:
44
- """
45
- Delete the previous message from the chat history.
46
- :param history: The chat history.
47
- :return: The chat history with the last message removed
48
- and the removed message.
49
- """
50
  try:
51
  message, _ = history.pop()
52
  except IndexError:
53
  message = ""
54
  return history, message or ""
55
 
56
-
57
  def generate(
58
  message: str,
59
  history_with_input: list[tuple[str, str]],
@@ -63,27 +48,11 @@ def generate(
63
  top_p: float,
64
  top_k: int,
65
  ) -> Iterator[list[tuple[str, str]]]:
66
- """
67
- Generate a response to the input message.
68
- :param message: The input message.
69
- :param history_with_input: The chat history with
70
- the input message appended.
71
- :param system_prompt: The system prompt.
72
- :param max_new_tokens: The maximum number of tokens to generate.
73
- :param temperature: The temperature.
74
- :param top_p: The top-p (nucleus sampling) probability.
75
- :param top_k: The top-k probability.
76
- :return: An iterator over the chat history with
77
- the generated response appended.
78
- """
79
  if max_new_tokens > MAX_MAX_NEW_TOKENS:
80
- raise ValueError
81
 
82
  history = history_with_input[:-1]
83
- generator = run(
84
- message, history,
85
- system_prompt, max_new_tokens, temperature, top_p, top_k
86
- )
87
  try:
88
  first_response = next(generator)
89
  yield history + [(message, first_response)]
@@ -92,60 +61,30 @@ def generate(
92
  for response in generator:
93
  yield history + [(message, response)]
94
 
95
-
96
- def process_example(message: str) -> tuple[str, list[tuple[str, str]]]:
97
- """
98
- Process an example.
99
- :param message: The input message.
100
- :return: A tuple of the empty string and the chat history with the \
101
- generated response appended.
102
- """
103
- generator = generate(message, [], DEFAULT_SYSTEM_PROMPT, 1024, 1, 0.95, 50)
104
- for x in generator:
105
- pass
106
- return "", x
107
-
108
-
109
- def check_input_token_length(
110
- message: str, chat_history: list[tuple[str, str]], system_prompt: str
111
- ) -> None:
112
- """
113
- Check that the accumulated input is not too long.
114
- :param message: The input message.
115
- :param chat_history: The chat history.
116
- :param system_prompt: The system prompt.
117
- :return: None.
118
- """
119
- input_token_length = len(message) + len(chat_history)
120
  if input_token_length > MAX_INPUT_TOKEN_LENGTH:
121
- raise gr.Error(
122
- f"The accumulated input is too long \
123
- ({input_token_length} > {MAX_INPUT_TOKEN_LENGTH}).\
124
- Clear your chat history and try again."
125
- )
126
-
127
 
128
- with gr.Blocks(css="./styles/style.css") as demo:
129
  gr.Markdown(DESCRIPTION)
130
- gr.DuplicateButton(
131
- value="Duplicate Space for private use", elem_id="duplicate-button"
132
- )
133
 
134
  with gr.Group():
135
- chatbot = gr.Chatbot(label="Playground")
136
  with gr.Row():
137
  textbox = gr.Textbox(
138
  container=False,
139
  show_label=False,
140
- placeholder="Greetings, with what Healthcare/Wellness topic can I help you with today?",
141
  scale=10,
142
  )
143
- submit_button = gr.Button("Submit", variant="primary",
144
- scale=1, min_width=0)
145
  with gr.Row():
146
- retry_button = gr.Button('πŸ”„ Retry', variant='secondary')
147
  undo_button = gr.Button('↩️ Undo', variant='secondary')
148
- clear_button = gr.Button('πŸ—‘οΈ Clear', variant='secondary')
149
 
150
  saved_input = gr.State()
151
 
@@ -189,117 +128,63 @@ with gr.Blocks(css="./styles/style.css") as demo:
189
  fn=clear_and_save_textbox,
190
  inputs=textbox,
191
  outputs=[textbox, saved_input],
192
- api_name=False,
193
- queue=False,
194
  ).then(
195
  fn=display_input,
196
  inputs=[saved_input, chatbot],
197
  outputs=chatbot,
198
- api_name=False,
199
- queue=False,
200
  ).then(
201
  fn=check_input_token_length,
202
  inputs=[saved_input, chatbot, system_prompt],
203
- api_name=False,
204
- queue=False,
205
  ).success(
206
  fn=generate,
207
- inputs=[
208
- saved_input,
209
- chatbot,
210
- system_prompt,
211
- max_new_tokens,
212
- temperature,
213
- top_p,
214
- top_k,
215
- ],
216
  outputs=chatbot,
217
- api_name=False,
218
  )
219
 
220
- button_event_preprocess = (
221
- submit_button.click(
222
- fn=clear_and_save_textbox,
223
- inputs=textbox,
224
- outputs=[textbox, saved_input],
225
- api_name=False,
226
- queue=False,
227
- )
228
- .then(
229
- fn=display_input,
230
- inputs=[saved_input, chatbot],
231
- outputs=chatbot,
232
- api_name=False,
233
- queue=False,
234
- )
235
- .then(
236
- fn=check_input_token_length,
237
- inputs=[saved_input, chatbot, system_prompt],
238
- api_name=False,
239
- queue=False,
240
- )
241
- .success(
242
- fn=generate,
243
- inputs=[
244
- saved_input,
245
- chatbot,
246
- system_prompt,
247
- max_new_tokens,
248
- temperature,
249
- top_p,
250
- top_k,
251
- ],
252
- outputs=chatbot,
253
- api_name=False,
254
- )
255
  )
256
 
257
  retry_button.click(
258
  fn=delete_prev_fn,
259
  inputs=chatbot,
260
  outputs=[chatbot, saved_input],
261
- api_name=False,
262
- queue=False,
263
  ).then(
264
  fn=display_input,
265
  inputs=[saved_input, chatbot],
266
  outputs=chatbot,
267
- api_name=False,
268
- queue=False,
269
  ).then(
270
  fn=generate,
271
- inputs=[
272
- saved_input,
273
- chatbot,
274
- system_prompt,
275
- max_new_tokens,
276
- temperature,
277
- top_p,
278
- top_k,
279
- ],
280
  outputs=chatbot,
281
- api_name=False,
282
  )
283
 
284
  undo_button.click(
285
  fn=delete_prev_fn,
286
  inputs=chatbot,
287
  outputs=[chatbot, saved_input],
288
- api_name=False,
289
- queue=False,
290
  ).then(
291
  fn=lambda x: x,
292
  inputs=[saved_input],
293
  outputs=textbox,
294
- api_name=False,
295
- queue=False,
296
  )
297
 
298
  clear_button.click(
299
  fn=lambda: ([], ""),
300
  outputs=[chatbot, saved_input],
301
- queue=False,
302
- api_name=False,
303
  )
304
 
305
- demo.queue(max_size=32).launch(share=HF_PUBLIC, show_api=False)
 
 
 
 
1
  import gradio as gr
2
+ from typing import Iterator, List, Tuple
3
 
4
+ # Mock model function to simulate response generation
5
+ def mock_run(
6
+ message: str,
7
+ chat_history: List[Tuple[str, str]],
8
+ system_prompt: str,
9
+ max_new_tokens: int,
10
+ temperature: float,
11
+ top_p: float,
12
+ top_k: int,
13
+ ) -> Iterator[str]:
14
+ response = f"Mock response to: {message}"
15
+ yield response
16
 
17
  DEFAULT_SYSTEM_PROMPT = "You are Phoenix AI Healthcare. You are professional, you are polite, give only truthful information and are based on the Mistral-7B model from Mistral AI about Healtcare and Wellness. You can communicate in different languages equally well."
18
+
19
  MAX_MAX_NEW_TOKENS = 4096
20
  DEFAULT_MAX_NEW_TOKENS = 256
21
  MAX_INPUT_TOKEN_LENGTH = 4000
22
 
23
  DESCRIPTION = """
24
+ # Simple Healthcare Chatbot
25
+ ### Powered by a mock model
26
  """
27
 
 
28
  def clear_and_save_textbox(message: str) -> tuple[str, str]:
 
 
 
 
 
29
  return "", message
30
 
31
+ def display_input(message: str, history: list[tuple[str, str]]) -> list[tuple[str, str]]:
 
 
 
 
 
 
 
 
 
32
  history.append((message, ""))
33
  return history
34
 
35
+ def delete_prev_fn(history: list[tuple[str, str]]) -> tuple[list[tuple[str, str]], str]:
 
 
 
 
 
 
 
 
36
  try:
37
  message, _ = history.pop()
38
  except IndexError:
39
  message = ""
40
  return history, message or ""
41
 
 
42
  def generate(
43
  message: str,
44
  history_with_input: list[tuple[str, str]],
 
48
  top_p: float,
49
  top_k: int,
50
  ) -> Iterator[list[tuple[str, str]]]:
 
 
 
 
 
 
 
 
 
 
 
 
 
51
  if max_new_tokens > MAX_MAX_NEW_TOKENS:
52
+ raise ValueError("Max new tokens exceeded")
53
 
54
  history = history_with_input[:-1]
55
+ generator = mock_run(message, history, system_prompt, max_new_tokens, temperature, top_p, top_k)
 
 
 
56
  try:
57
  first_response = next(generator)
58
  yield history + [(message, first_response)]
 
61
  for response in generator:
62
  yield history + [(message, response)]
63
 
64
+ def check_input_token_length(message: str, chat_history: list[tuple[str, str]], system_prompt: str) -> None:
65
+ input_token_length = len(message) + sum(len(msg) for msg, _ in chat_history)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
66
  if input_token_length > MAX_INPUT_TOKEN_LENGTH:
67
+ raise gr.Error(f"The accumulated input is too long ({input_token_length} > {MAX_INPUT_TOKEN_LENGTH}). Clear your chat history and try again.")
 
 
 
 
 
68
 
69
+ with gr.Blocks() as demo:
70
  gr.Markdown(DESCRIPTION)
71
+ gr.Button("Duplicate Space for private use", elem_id="duplicate-button")
 
 
72
 
73
  with gr.Group():
74
+ chatbot = gr.Chatbot(label="Chat with Healthcare AI")
75
  with gr.Row():
76
  textbox = gr.Textbox(
77
  container=False,
78
  show_label=False,
79
+ placeholder="Ask me anything about Healthcare and Wellness...",
80
  scale=10,
81
  )
82
+ submit_button = gr.Button("Submit", variant="primary", scale=1, min_width=0)
83
+
84
  with gr.Row():
85
+ retry_button = gr.Button('πŸ”„ Retry', variant='secondary')
86
  undo_button = gr.Button('↩️ Undo', variant='secondary')
87
+ clear_button = gr.Button('πŸ—‘οΈ Clear', variant='secondary')
88
 
89
  saved_input = gr.State()
90
 
 
128
  fn=clear_and_save_textbox,
129
  inputs=textbox,
130
  outputs=[textbox, saved_input],
 
 
131
  ).then(
132
  fn=display_input,
133
  inputs=[saved_input, chatbot],
134
  outputs=chatbot,
 
 
135
  ).then(
136
  fn=check_input_token_length,
137
  inputs=[saved_input, chatbot, system_prompt],
 
 
138
  ).success(
139
  fn=generate,
140
+ inputs=[saved_input, chatbot, system_prompt, max_new_tokens, temperature, top_p, top_k],
 
 
 
 
 
 
 
 
141
  outputs=chatbot,
 
142
  )
143
 
144
+ submit_button.click(
145
+ fn=clear_and_save_textbox,
146
+ inputs=textbox,
147
+ outputs=[textbox, saved_input],
148
+ ).then(
149
+ fn=display_input,
150
+ inputs=[saved_input, chatbot],
151
+ outputs=chatbot,
152
+ ).then(
153
+ fn=check_input_token_length,
154
+ inputs=[saved_input, chatbot, system_prompt],
155
+ ).success(
156
+ fn=generate,
157
+ inputs=[saved_input, chatbot, system_prompt, max_new_tokens, temperature, top_p, top_k],
158
+ outputs=chatbot,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
159
  )
160
 
161
  retry_button.click(
162
  fn=delete_prev_fn,
163
  inputs=chatbot,
164
  outputs=[chatbot, saved_input],
 
 
165
  ).then(
166
  fn=display_input,
167
  inputs=[saved_input, chatbot],
168
  outputs=chatbot,
 
 
169
  ).then(
170
  fn=generate,
171
+ inputs=[saved_input, chatbot, system_prompt, max_new_tokens, temperature, top_p, top_k],
 
 
 
 
 
 
 
 
172
  outputs=chatbot,
 
173
  )
174
 
175
  undo_button.click(
176
  fn=delete_prev_fn,
177
  inputs=chatbot,
178
  outputs=[chatbot, saved_input],
 
 
179
  ).then(
180
  fn=lambda x: x,
181
  inputs=[saved_input],
182
  outputs=textbox,
 
 
183
  )
184
 
185
  clear_button.click(
186
  fn=lambda: ([], ""),
187
  outputs=[chatbot, saved_input],
 
 
188
  )
189
 
190
+ demo.queue(max_size=32).launch(share=False)