Nymbo commited on
Commit
81fd32e
·
verified ·
1 Parent(s): a8fc89d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +133 -133
app.py CHANGED
@@ -99,6 +99,139 @@ def respond(
99
  # GRADIO UI CONFIGURATION
100
  # -------------------------
101
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
102
  # Create a Chatbot component with a specified height
103
  chatbot = gr.Chatbot(height=600) # Define the height of the chatbot interface
104
  print("Chatbot interface created.")
@@ -242,139 +375,6 @@ with demo:
242
  )
243
  print("Featured model radio button change event linked.")
244
 
245
- # -----------
246
- # ADDING THE "INFORMATION" TAB
247
- # -----------
248
- with gr.Tab("Information"):
249
- with gr.Row():
250
- # Accordion for Featured Models
251
- with gr.Accordion("Featured Models", open=False):
252
- gr.HTML(
253
- """
254
- <table style="width:100%; text-align:center; margin:auto;">
255
- <tr>
256
- <th>Model Name</th>
257
- <th>Typography</th>
258
- <th>Notes</th>
259
- </tr>
260
- <tr>
261
- <td>meta-llama/Llama-3.3-70B-Instruct</td>
262
- <td>✅</td>
263
- <td></td>
264
- </tr>
265
- <tr>
266
- <td>meta-llama/Llama-3.2-3B-Instruct</td>
267
- <td>✅</td>
268
- <td></td>
269
- </tr>
270
- <tr>
271
- <td>meta-llama/Llama-3.2-1B-Instruct</td>
272
- <td>✅</td>
273
- <td></td>
274
- </tr>
275
- <tr>
276
- <td>meta-llama/Llama-3.1-8B-Instruct</td>
277
- <td>✅</td>
278
- <td></td>
279
- </tr>
280
- <tr>
281
- <td>NousResearch/Hermes-3-Llama-3.1-8B</td>
282
- <td>✅</td>
283
- <td></td>
284
- </tr>
285
- <tr>
286
- <td>google/gemma-2-27b-it</td>
287
- <td>✅</td>
288
- <td></td>
289
- </tr>
290
- <tr>
291
- <td>google/gemma-2-9b-it</td>
292
- <td>✅</td>
293
- <td></td>
294
- </tr>
295
- <tr>
296
- <td>google/gemma-2-2b-it</td>
297
- <td>✅</td>
298
- <td></td>
299
- </tr>
300
- <tr>
301
- <td>mistralai/Mistral-Nemo-Instruct-2407</td>
302
- <td>✅</td>
303
- <td></td>
304
- </tr>
305
- <tr>
306
- <td>mistralai/Mixtral-8x7B-Instruct-v0.1</td>
307
- <td>✅</td>
308
- <td></td>
309
- </tr>
310
- <tr>
311
- <td>mistralai/Mistral-7B-Instruct-v0.3</td>
312
- <td>✅</td>
313
- <td></td>
314
- </tr>
315
- <tr>
316
- <td>Qwen/Qwen2.5-72B-Instruct</td>
317
- <td>✅</td>
318
- <td></td>
319
- </tr>
320
- <tr>
321
- <td>Qwen/QwQ-32B-Preview</td>
322
- <td>✅</td>
323
- <td></td>
324
- </tr>
325
- <tr>
326
- <td>PowerInfer/SmallThinker-3B-Preview</td>
327
- <td>✅</td>
328
- <td></td>
329
- </tr>
330
- <tr>
331
- <td>HuggingFaceTB/SmolLM2-1.7B-Instruct</td>
332
- <td>✅</td>
333
- <td></td>
334
- </tr>
335
- <tr>
336
- <td>TinyLlama/TinyLlama-1.1B-Chat-v1.0</td>
337
- <td>✅</td>
338
- <td></td>
339
- </tr>
340
- <tr>
341
- <td>microsoft/Phi-3.5-mini-instruct</td>
342
- <td>✅</td>
343
- <td></td>
344
- </tr>
345
- </table>
346
- """
347
- )
348
-
349
- # Accordion for Parameters Overview
350
- with gr.Accordion("Parameters Overview", open=False):
351
- gr.Markdown(
352
- """
353
- ## System Message
354
- ###### This box is for setting the initial context or instructions for the AI. It helps guide the AI on how to respond to your inputs.
355
-
356
- ## Max New Tokens
357
- ###### This slider allows you to specify the maximum number of tokens (words or parts of words) the AI can generate in a single response. The default value is 512, and the maximum is 4096.
358
-
359
- ## Temperature
360
- ###### Temperature controls the randomness of the AI's responses. A higher temperature makes the responses more creative and varied, while a lower temperature makes them more predictable and focused. The default value is 0.7.
361
-
362
- ## Top-P (Nucleus Sampling)
363
- ###### Top-P sampling is another way to control the diversity of the AI's responses. It ensures that the AI only considers the most likely tokens up to a cumulative probability of P. The default value is 0.95.
364
-
365
- ## Frequency Penalty
366
- ###### This penalty discourages the AI from repeating the same tokens (words or phrases) in its responses. A higher penalty reduces repetition. The default value is 0.0.
367
-
368
- ## Seed
369
- ###### The seed is a number that ensures the reproducibility of the AI's responses. If you set a specific seed, the AI will generate the same response every time for the same input. If you set it to -1, the AI will generate a random seed each time.
370
-
371
- ## Custom Model
372
- ###### You can specify a custom Hugging Face model path here. This will override any selected featured model. This is optional and allows you to use models not listed in the featured models.
373
-
374
- ### Remember, these settings are all about giving you control over the text generation process. Feel free to experiment and see what each one does. And if you're ever in doubt, the default settings are a great place to start. Happy creating!
375
- """
376
- )
377
-
378
  print("Gradio interface initialized.")
379
 
380
  if __name__ == "__main__":
 
99
  # GRADIO UI CONFIGURATION
100
  # -------------------------
101
 
102
+ # -----------
103
+ # ADDING THE "INFORMATION" TAB
104
+ # -----------
105
+ with gr.Tab("Information"):
106
+ with gr.Row():
107
+ # Accordion for Featured Models
108
+ with gr.Accordion("Featured Models", open=False):
109
+ gr.HTML(
110
+ """
111
+ <table style="width:100%; text-align:center; margin:auto;">
112
+ <tr>
113
+ <th>Model Name</th>
114
+ <th>Typography</th>
115
+ <th>Notes</th>
116
+ </tr>
117
+ <tr>
118
+ <td>meta-llama/Llama-3.3-70B-Instruct</td>
119
+ <td>✅</td>
120
+ <td></td>
121
+ </tr>
122
+ <tr>
123
+ <td>meta-llama/Llama-3.2-3B-Instruct</td>
124
+ <td>✅</td>
125
+ <td></td>
126
+ </tr>
127
+ <tr>
128
+ <td>meta-llama/Llama-3.2-1B-Instruct</td>
129
+ <td>✅</td>
130
+ <td></td>
131
+ </tr>
132
+ <tr>
133
+ <td>meta-llama/Llama-3.1-8B-Instruct</td>
134
+ <td>✅</td>
135
+ <td></td>
136
+ </tr>
137
+ <tr>
138
+ <td>NousResearch/Hermes-3-Llama-3.1-8B</td>
139
+ <td>✅</td>
140
+ <td></td>
141
+ </tr>
142
+ <tr>
143
+ <td>google/gemma-2-27b-it</td>
144
+ <td>✅</td>
145
+ <td></td>
146
+ </tr>
147
+ <tr>
148
+ <td>google/gemma-2-9b-it</td>
149
+ <td>✅</td>
150
+ <td></td>
151
+ </tr>
152
+ <tr>
153
+ <td>google/gemma-2-2b-it</td>
154
+ <td>✅</td>
155
+ <td></td>
156
+ </tr>
157
+ <tr>
158
+ <td>mistralai/Mistral-Nemo-Instruct-2407</td>
159
+ <td>✅</td>
160
+ <td></td>
161
+ </tr>
162
+ <tr>
163
+ <td>mistralai/Mixtral-8x7B-Instruct-v0.1</td>
164
+ <td>✅</td>
165
+ <td></td>
166
+ </tr>
167
+ <tr>
168
+ <td>mistralai/Mistral-7B-Instruct-v0.3</td>
169
+ <td>✅</td>
170
+ <td></td>
171
+ </tr>
172
+ <tr>
173
+ <td>Qwen/Qwen2.5-72B-Instruct</td>
174
+ <td>✅</td>
175
+ <td></td>
176
+ </tr>
177
+ <tr>
178
+ <td>Qwen/QwQ-32B-Preview</td>
179
+ <td>✅</td>
180
+ <td></td>
181
+ </tr>
182
+ <tr>
183
+ <td>PowerInfer/SmallThinker-3B-Preview</td>
184
+ <td>✅</td>
185
+ <td></td>
186
+ </tr>
187
+ <tr>
188
+ <td>HuggingFaceTB/SmolLM2-1.7B-Instruct</td>
189
+ <td>✅</td>
190
+ <td></td>
191
+ </tr>
192
+ <tr>
193
+ <td>TinyLlama/TinyLlama-1.1B-Chat-v1.0</td>
194
+ <td>✅</td>
195
+ <td></td>
196
+ </tr>
197
+ <tr>
198
+ <td>microsoft/Phi-3.5-mini-instruct</td>
199
+ <td>✅</td>
200
+ <td></td>
201
+ </tr>
202
+ </table>
203
+ """
204
+ )
205
+
206
+ # Accordion for Parameters Overview
207
+ with gr.Accordion("Parameters Overview", open=False):
208
+ gr.Markdown(
209
+ """
210
+ ## System Message
211
+ ###### This box is for setting the initial context or instructions for the AI. It helps guide the AI on how to respond to your inputs.
212
+
213
+ ## Max New Tokens
214
+ ###### This slider allows you to specify the maximum number of tokens (words or parts of words) the AI can generate in a single response. The default value is 512, and the maximum is 4096.
215
+
216
+ ## Temperature
217
+ ###### Temperature controls the randomness of the AI's responses. A higher temperature makes the responses more creative and varied, while a lower temperature makes them more predictable and focused. The default value is 0.7.
218
+
219
+ ## Top-P (Nucleus Sampling)
220
+ ###### Top-P sampling is another way to control the diversity of the AI's responses. It ensures that the AI only considers the most likely tokens up to a cumulative probability of P. The default value is 0.95.
221
+
222
+ ## Frequency Penalty
223
+ ###### This penalty discourages the AI from repeating the same tokens (words or phrases) in its responses. A higher penalty reduces repetition. The default value is 0.0.
224
+
225
+ ## Seed
226
+ ###### The seed is a number that ensures the reproducibility of the AI's responses. If you set a specific seed, the AI will generate the same response every time for the same input. If you set it to -1, the AI will generate a random seed each time.
227
+
228
+ ## Custom Model
229
+ ###### You can specify a custom Hugging Face model path here. This will override any selected featured model. This is optional and allows you to use models not listed in the featured models.
230
+
231
+ ### Remember, these settings are all about giving you control over the text generation process. Feel free to experiment and see what each one does. And if you're ever in doubt, the default settings are a great place to start. Happy creating!
232
+ """
233
+ )
234
+
235
  # Create a Chatbot component with a specified height
236
  chatbot = gr.Chatbot(height=600) # Define the height of the chatbot interface
237
  print("Chatbot interface created.")
 
375
  )
376
  print("Featured model radio button change event linked.")
377
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
378
  print("Gradio interface initialized.")
379
 
380
  if __name__ == "__main__":