Update app.py
Browse files
app.py
CHANGED
@@ -1,21 +1,7 @@
|
|
1 |
import gradio as gr
|
2 |
from huggingface_hub import InferenceClient
|
3 |
-
import pandas as pd
|
4 |
|
5 |
-
#
|
6 |
-
image_url = "https://drive.google.com/uc?export=view&id=1AB7sFKxPLkJE_RmyUDap6fFaDlu1XGJl"
|
7 |
-
|
8 |
-
# Define the system message
|
9 |
-
system_message = """
|
10 |
-
You are a Career Counseling Chatbot. Analyze the student's academic performance and extracurricular activities to provide career guidance. Based on the provided data, respond in the following format and must include the following headings:
|
11 |
-
# **Student's Primary Interest with Reason**
|
12 |
-
# **Career Opportunities in the field**
|
13 |
-
# **Universities in Pakistan for related field**
|
14 |
-
# **Conclusion with name of field**
|
15 |
-
Ensure that the analysis is based on the student's performance in subjects and extracurriculars, and suggest relevant career options with details on possible high ranking universities in Pakistan.
|
16 |
-
"""
|
17 |
-
|
18 |
-
# CSS to hide footer, customize button, and center image
|
19 |
css = """
|
20 |
footer {display:none !important}
|
21 |
.output-markdown{display:none !important}
|
@@ -80,25 +66,10 @@ footer {display:none !important}
|
|
80 |
--tw-text-opacity: 1 !important;
|
81 |
color:rgb(37 56 133 / var(--tw-text-opacity)) !important;
|
82 |
}
|
83 |
-
#image-container {
|
84 |
-
display: flex;
|
85 |
-
justify-content: center;
|
86 |
-
align-items: center;
|
87 |
-
height: auto; /* Adjust the height as needed */
|
88 |
-
margin-top: 20px; /* Adjust the margin as needed */
|
89 |
-
}
|
90 |
-
#compass-image {
|
91 |
-
max-width: 800px; /* Adjust the width as needed */
|
92 |
-
max-height: 600px; /* Adjust the height as needed */
|
93 |
-
object-fit: contain; /* Maintains aspect ratio */
|
94 |
-
}
|
95 |
"""
|
96 |
|
97 |
# Initialize the InferenceClient for chatbot
|
98 |
-
client = InferenceClient("HuggingFaceH4/zephyr-7b-
|
99 |
-
|
100 |
-
# Global variable to store chat history for the current session
|
101 |
-
current_chat_history = []
|
102 |
|
103 |
# Define the function for chatbot response
|
104 |
def respond(
|
@@ -109,8 +80,6 @@ def respond(
|
|
109 |
temperature,
|
110 |
top_p,
|
111 |
):
|
112 |
-
global current_chat_history
|
113 |
-
|
114 |
messages = [{"role": "system", "content": system_message}]
|
115 |
|
116 |
for val in history:
|
@@ -118,11 +87,9 @@ def respond(
|
|
118 |
messages.append({"role": "user", "content": val[0]})
|
119 |
if val[1]:
|
120 |
messages.append({"role": "assistant", "content": val[1]})
|
121 |
-
|
122 |
-
|
123 |
messages.append({"role": "user", "content": message})
|
124 |
-
|
125 |
-
|
126 |
response = ""
|
127 |
|
128 |
for message in client.chat_completion(
|
@@ -136,23 +103,6 @@ def respond(
|
|
136 |
response += token
|
137 |
yield response
|
138 |
|
139 |
-
# Append the assistant's final response to the history
|
140 |
-
current_chat_history.append(f"Assistant: {response}")
|
141 |
-
|
142 |
-
def download_chat_history():
|
143 |
-
# Join the current chat history into a single string
|
144 |
-
history_str = "\n".join(current_chat_history)
|
145 |
-
# Save the chat history to a text file
|
146 |
-
with open("chat_history.txt", "w") as f:
|
147 |
-
f.write(history_str)
|
148 |
-
return "chat_history.txt"
|
149 |
-
|
150 |
-
def clear_chat_history():
|
151 |
-
# Reset the current chat history
|
152 |
-
global current_chat_history
|
153 |
-
current_chat_history.clear() # Clear the chat history
|
154 |
-
return "Chat history cleared."
|
155 |
-
|
156 |
def send_message(message, history, system_message, max_tokens, temperature, top_p):
|
157 |
if message:
|
158 |
history.append((message, ""))
|
@@ -167,86 +117,78 @@ def send_message(message, history, system_message, max_tokens, temperature, top_
|
|
167 |
response_text = ""
|
168 |
for r in response:
|
169 |
response_text = r
|
170 |
-
|
171 |
-
formatted_response_text = response_text.replace(
|
172 |
-
"Student's Primary Interest with Reason:", "<h2><strong>Student's Primary Interest with Reason</strong></h2>"
|
173 |
-
).replace(
|
174 |
-
"Career Opportunities in the field:", "<h2><strong>Career Opportunities in the field</strong></h2>"
|
175 |
-
).replace(
|
176 |
-
"Universities in Pakistan for related field:", "<h2><strong>Universities in Pakistan for related field</strong></h2>"
|
177 |
-
).replace(
|
178 |
-
"Conclusion with name of field:", "<h2><strong>Conclusion with name of field</strong></h2>"
|
179 |
-
)
|
180 |
-
history[-1] = (message, formatted_response_text)
|
181 |
return history, gr.update(value="")
|
182 |
|
183 |
-
#
|
184 |
-
|
185 |
-
|
186 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
187 |
|
188 |
-
#
|
189 |
with gr.Blocks(css=css) as demo:
|
190 |
-
|
191 |
-
|
192 |
-
|
193 |
-
gr.Image(image_url, elem_id="compass-image")
|
194 |
|
195 |
-
gr.
|
196 |
-
gr.
|
197 |
-
gr.
|
198 |
-
|
199 |
-
- **Personalized Analysis:** Delivers career advice tailored to individual student profiles.
|
200 |
-
- **Streamlined Interface:** Simple and intuitive user experience.
|
201 |
-
- **Detailed Reports:** Offers insights into suitable career paths, relevant universities, and job opportunities.
|
202 |
-
- **Aptitude Test:** Take the Aptitude Test to determine your interest and find out the relevent field.
|
203 |
-
**Libraries Used:**
|
204 |
-
- **Gradio:** For creating the user interface.
|
205 |
-
- **Pandas:** For reading and analyzing Excel files.
|
206 |
-
- **Hugging API and LLM:** Zephyr-7b-beta For utilizing state-of-the-art language models.
|
207 |
|
208 |
-
|
209 |
-
|
210 |
-
|
211 |
-
|
212 |
-
3. Get detailed recommendations and potential career paths!
|
213 |
-
- **Aptitude test**
|
214 |
-
1. Or choose to take "Aptitude test"
|
215 |
-
2. Click on "generate an aptitude test for me (10 questions)"
|
216 |
-
3. After that, 10 questions would appear.
|
217 |
-
4. Answer those questions and submit the response.
|
218 |
-
5. Get the AI analyzed answer and recommendations and potential career paths!
|
219 |
-
""")
|
220 |
-
|
221 |
-
# Detailed Analysis Tab
|
222 |
-
with gr.Tab("Detailed Analysis"):
|
223 |
-
gr.Markdown("# Detailed Analysis")
|
224 |
-
gr.Markdown("Get personalized career guidance based on academic performance and extracurricular activities with Detailed Analysis.\n<div style='color: green;'>Developed by Hashir Ehtisham</div>")
|
225 |
-
|
226 |
-
system_message_career = gr.Textbox(value=system_message, visible=False)
|
227 |
-
chatbot_career = gr.Chatbot()
|
228 |
-
msg_career = gr.Textbox(label="Enter the Excel Copied Data here")
|
229 |
-
|
230 |
-
with gr.Row():
|
231 |
-
clear_career = gr.Button("New Chat")
|
232 |
-
download_button = gr.Button("Download Chat History")
|
233 |
-
submit_career = gr.Button("Submit")
|
234 |
|
235 |
-
|
236 |
-
|
237 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
238 |
|
239 |
-
|
240 |
-
|
241 |
-
|
242 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
243 |
|
244 |
with gr.Accordion("Additional Inputs", open=False):
|
245 |
-
|
246 |
-
|
247 |
-
|
248 |
|
249 |
-
def
|
250 |
chat_history, _ = send_message(
|
251 |
message=message,
|
252 |
history=chat_history,
|
@@ -255,48 +197,26 @@ with gr.Blocks(css=css) as demo:
|
|
255 |
temperature=temperature_val,
|
256 |
top_p=top_p_val,
|
257 |
)
|
258 |
-
return gr.update(value=
|
259 |
|
260 |
-
|
261 |
-
|
262 |
-
inputs=[msg_career, chatbot_career, system_message_career, max_tokens_career, temperature_career, top_p_career],
|
263 |
-
outputs=[chatbot_career, msg_career],
|
264 |
-
)
|
265 |
-
|
266 |
-
clear_career.click(lambda: None, None, chatbot_career, queue=False)
|
267 |
-
|
268 |
-
# File Upload Tab
|
269 |
-
with gr.Tab("Upload Data"):
|
270 |
-
gr.Markdown("# Upload Data")
|
271 |
-
file_input = gr.File(label="Upload Excel file")
|
272 |
-
excel_output = gr.Textbox(label="Excel Content")
|
273 |
-
file_input.change(read_excel, inputs=file_input, outputs=excel_output)
|
274 |
|
275 |
-
|
276 |
-
|
277 |
-
gr.Markdown(
|
278 |
-
|
279 |
-
|
280 |
-
|
281 |
-
|
282 |
-
|
283 |
-
|
284 |
-
|
285 |
-
system_message_aptitude = gr.Textbox(value="You are an Aptitude Test Chatbot", visible=False)
|
286 |
-
chatbot_aptitude = gr.Chatbot()
|
287 |
-
msg_aptitude = gr.Textbox(label="Your message")
|
288 |
-
|
289 |
-
with gr.Row():
|
290 |
-
clear_aptitude = gr.Button("Clear")
|
291 |
-
submit_aptitude = gr.Button("Submit")
|
292 |
-
example_button = gr.Button("Generate Aptitude Test (10 questions)")
|
293 |
-
|
294 |
with gr.Accordion("Additional Inputs", open=False):
|
295 |
-
|
296 |
-
|
297 |
-
|
298 |
|
299 |
-
def
|
300 |
chat_history, _ = send_message(
|
301 |
message=message,
|
302 |
history=chat_history,
|
@@ -305,62 +225,36 @@ with gr.Blocks(css=css) as demo:
|
|
305 |
temperature=temperature_val,
|
306 |
top_p=top_p_val,
|
307 |
)
|
308 |
-
return chat_history
|
309 |
|
310 |
-
|
311 |
-
|
312 |
-
|
313 |
-
|
314 |
-
|
315 |
-
|
316 |
-
max_tokens_aptitude,
|
317 |
-
temperature_aptitude,
|
318 |
-
top_p_aptitude,
|
319 |
-
],
|
320 |
-
outputs=[chatbot_aptitude],
|
321 |
-
)
|
322 |
-
|
323 |
-
clear_aptitude.click(lambda: None, None, chatbot_aptitude)
|
324 |
-
|
325 |
-
def copy_to_message():
|
326 |
-
return gr.update(value="Create a 10-question aptitude test designed to assess a student's interests in various academic fields such as Engineering, Medicine, Computer Science, Law, Finance and Arts. Each question should be multiple-choice with six options, and should ask the student about their preferences, interests, or inclinations towards activities, subjects, or scenarios related to these fields. Avoid questions that require prior knowledge in any specific subject. The test should be suitable for high school students exploring potential career paths.")
|
327 |
-
submit_aptitude.click(
|
328 |
-
respond_wrapper_aptitude,
|
329 |
-
inputs=[
|
330 |
-
msg_aptitude,
|
331 |
-
chatbot_aptitude,
|
332 |
-
system_message_aptitude,
|
333 |
-
max_tokens_aptitude,
|
334 |
-
temperature_aptitude,
|
335 |
-
top_p_aptitude,
|
336 |
-
],
|
337 |
-
outputs=[chatbot_aptitude],
|
338 |
-
)
|
339 |
-
|
340 |
-
example_button.click(copy_to_message, [], [msg_aptitude])
|
341 |
-
|
342 |
-
# Simple Chatbot Tab (new tab integration)
|
343 |
-
with gr.Tab("General Guidance & Emotional Support"):
|
344 |
-
gr.Markdown("# General Guidance & Emotional Support")
|
345 |
-
gr.Markdown("""
|
346 |
-
A compassionate career counseling chatbot providing personalized guidance on career paths and emotional support for your journey.
|
347 |
-
<div style='color: green;'>Developed by Hashir Ehtisham</div>
|
348 |
-
""")
|
349 |
-
|
350 |
-
system_message_simple = gr.Textbox(value="You are an AI powered chatbot named as Career Compass built by Hashir Ehtisham who is a student of APS DHA II Sec -D to help students, teachers, and parents find the best career paths based on students' interests and academic performance.", visible=False)
|
351 |
-
chatbot_simple = gr.Chatbot()
|
352 |
-
msg_simple = gr.Textbox(label="Type a message")
|
353 |
|
354 |
-
|
355 |
-
|
356 |
-
|
|
|
357 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
358 |
with gr.Accordion("Additional Inputs", open=False):
|
359 |
-
|
360 |
-
|
361 |
-
|
362 |
-
|
363 |
-
def
|
364 |
chat_history, _ = send_message(
|
365 |
message=message,
|
366 |
history=chat_history,
|
@@ -369,22 +263,10 @@ with gr.Blocks(css=css) as demo:
|
|
369 |
temperature=temperature_val,
|
370 |
top_p=top_p_val,
|
371 |
)
|
372 |
-
return chat_history
|
373 |
-
|
374 |
-
submit_simple.click(
|
375 |
-
respond_wrapper_simple,
|
376 |
-
inputs=[
|
377 |
-
msg_simple,
|
378 |
-
chatbot_simple,
|
379 |
-
system_message_simple,
|
380 |
-
max_tokens_simple,
|
381 |
-
temperature_simple,
|
382 |
-
top_p_simple,
|
383 |
-
],
|
384 |
-
outputs=[chatbot_simple],
|
385 |
-
)
|
386 |
|
387 |
-
|
|
|
388 |
|
389 |
-
# Launch the Gradio
|
390 |
-
demo.launch()
|
|
|
1 |
import gradio as gr
|
2 |
from huggingface_hub import InferenceClient
|
|
|
3 |
|
4 |
+
# CSS to hide footer and customize button
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
5 |
css = """
|
6 |
footer {display:none !important}
|
7 |
.output-markdown{display:none !important}
|
|
|
66 |
--tw-text-opacity: 1 !important;
|
67 |
color:rgb(37 56 133 / var(--tw-text-opacity)) !important;
|
68 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
69 |
"""
|
70 |
|
71 |
# Initialize the InferenceClient for chatbot
|
72 |
+
client = InferenceClient("HuggingFaceH4/zephyr-7b-alpha")
|
|
|
|
|
|
|
73 |
|
74 |
# Define the function for chatbot response
|
75 |
def respond(
|
|
|
80 |
temperature,
|
81 |
top_p,
|
82 |
):
|
|
|
|
|
83 |
messages = [{"role": "system", "content": system_message}]
|
84 |
|
85 |
for val in history:
|
|
|
87 |
messages.append({"role": "user", "content": val[0]})
|
88 |
if val[1]:
|
89 |
messages.append({"role": "assistant", "content": val[1]})
|
90 |
+
|
|
|
91 |
messages.append({"role": "user", "content": message})
|
92 |
+
|
|
|
93 |
response = ""
|
94 |
|
95 |
for message in client.chat_completion(
|
|
|
103 |
response += token
|
104 |
yield response
|
105 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
106 |
def send_message(message, history, system_message, max_tokens, temperature, top_p):
|
107 |
if message:
|
108 |
history.append((message, ""))
|
|
|
117 |
response_text = ""
|
118 |
for r in response:
|
119 |
response_text = r
|
120 |
+
history[-1] = (message, response_text)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
121 |
return history, gr.update(value="")
|
122 |
|
123 |
+
# Description for the chatbot
|
124 |
+
description = """
|
125 |
+
Hello! I'm here to support you emotionally and answer any questions. How are you feeling today?
|
126 |
+
<div style='color: green;'>Developed by Hashir Ehtisham</div>
|
127 |
+
"""
|
128 |
+
|
129 |
+
# Motivational tagline for the new tab
|
130 |
+
motivational_tagline = """
|
131 |
+
Welcome to the Motivational Quotes tab! Let’s ignite your day with some inspiration. What do you need motivation for today?
|
132 |
+
<div style='color: green;'>Developed by Hashir Ehtisham</div>
|
133 |
+
"""
|
134 |
+
|
135 |
+
# Emotions Detector tagline for the new tab
|
136 |
+
emotions_detector_tagline = """
|
137 |
+
Know how your message sounds and how to improve the tone of the message with Emotions Detector.
|
138 |
+
<div style='color: green;'>Developed by Hashir Ehtisham</div>
|
139 |
+
"""
|
140 |
+
|
141 |
+
# Jokes tagline for the new tab
|
142 |
+
jokes_tagline = """
|
143 |
+
Ready for a good laugh? Ask me for a joke to lighten up your mood!
|
144 |
+
<div style='color: green;'>Developed by Hashir Ehtisham</div>
|
145 |
+
"""
|
146 |
|
147 |
+
# Define the Gradio Blocks interface
|
148 |
with gr.Blocks(css=css) as demo:
|
149 |
+
with gr.Tab("Emotional Support Chatbot"):
|
150 |
+
gr.Markdown("# Emotional Support Chatbot")
|
151 |
+
gr.Markdown(description)
|
|
|
152 |
|
153 |
+
system_message = gr.Textbox(value="You are a friendly Emotional Support Chatbot.", visible=False)
|
154 |
+
chatbot = gr.Chatbot()
|
155 |
+
msg = gr.Textbox(label="Your message")
|
156 |
+
clear = gr.Button("Clear")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
157 |
|
158 |
+
with gr.Accordion("Additional Inputs", open=False):
|
159 |
+
max_tokens = gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens")
|
160 |
+
temperature = gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature")
|
161 |
+
top_p = gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
162 |
|
163 |
+
def respond_wrapper(message, chat_history, system_message_val, max_tokens_val, temperature_val, top_p_val):
|
164 |
+
chat_history, _ = send_message(
|
165 |
+
message=message,
|
166 |
+
history=chat_history,
|
167 |
+
system_message=system_message_val,
|
168 |
+
max_tokens=max_tokens_val,
|
169 |
+
temperature=temperature_val,
|
170 |
+
top_p=top_p_val,
|
171 |
+
)
|
172 |
+
return gr.update(value=""), chat_history
|
173 |
|
174 |
+
msg.submit(respond_wrapper, [msg, chatbot, system_message, max_tokens, temperature, top_p], [msg, chatbot])
|
175 |
+
clear.click(lambda: None, None, chatbot, queue=False)
|
176 |
+
|
177 |
+
with gr.Tab("Motivational Quotes"):
|
178 |
+
gr.Markdown("# Motivational Quotes")
|
179 |
+
gr.Markdown(motivational_tagline)
|
180 |
+
|
181 |
+
system_message_motivational = gr.Textbox(value="You are a friendly Motivational Quotes Chatbot.", visible=False)
|
182 |
+
chatbot_motivational = gr.Chatbot()
|
183 |
+
msg_motivational = gr.Textbox(label="Your message")
|
184 |
+
clear_motivational = gr.Button("Clear")
|
185 |
|
186 |
with gr.Accordion("Additional Inputs", open=False):
|
187 |
+
max_tokens_motivational = gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens")
|
188 |
+
temperature_motivational = gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature")
|
189 |
+
top_p_motivational = gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)")
|
190 |
|
191 |
+
def respond_wrapper_motivational(message, chat_history, system_message_val, max_tokens_val, temperature_val, top_p_val):
|
192 |
chat_history, _ = send_message(
|
193 |
message=message,
|
194 |
history=chat_history,
|
|
|
197 |
temperature=temperature_val,
|
198 |
top_p=top_p_val,
|
199 |
)
|
200 |
+
return gr.update(value=""), chat_history
|
201 |
|
202 |
+
msg_motivational.submit(respond_wrapper_motivational, [msg_motivational, chatbot_motivational, system_message_motivational, max_tokens_motivational, temperature_motivational, top_p_motivational], [msg_motivational, chatbot_motivational])
|
203 |
+
clear_motivational.click(lambda: None, None, chatbot_motivational, queue=False)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
204 |
|
205 |
+
with gr.Tab("Emotions Detector"):
|
206 |
+
gr.Markdown("# Emotions Detector")
|
207 |
+
gr.Markdown(emotions_detector_tagline)
|
208 |
+
|
209 |
+
system_message_emotions = gr.Textbox(value="You are an Emotions Detector Chatbot. Analyze the tone of the message (happy, sad, angry, neutral) and answer back.", visible=False)
|
210 |
+
chatbot_emotions = gr.Chatbot()
|
211 |
+
msg_emotions = gr.Textbox(label="Your message")
|
212 |
+
clear_emotions = gr.Button("Clear")
|
213 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
214 |
with gr.Accordion("Additional Inputs", open=False):
|
215 |
+
max_tokens_emotions = gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens")
|
216 |
+
temperature_emotions = gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature")
|
217 |
+
top_p_emotions = gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)")
|
218 |
|
219 |
+
def respond_wrapper_emotions(message, chat_history, system_message_val, max_tokens_val, temperature_val, top_p_val):
|
220 |
chat_history, _ = send_message(
|
221 |
message=message,
|
222 |
history=chat_history,
|
|
|
225 |
temperature=temperature_val,
|
226 |
top_p=top_p_val,
|
227 |
)
|
228 |
+
return gr.update(value=""), chat_history
|
229 |
|
230 |
+
msg_emotions.submit(respond_wrapper_emotions, [msg_emotions, chatbot_emotions, system_message_emotions, max_tokens_emotions, temperature_emotions, top_p_emotions], [msg_emotions, chatbot_emotions])
|
231 |
+
clear_emotions.click(lambda: None, None, chatbot_emotions, queue=False)
|
232 |
+
|
233 |
+
with gr.Tab("Jokes for You"):
|
234 |
+
gr.Markdown("# Jokes for You")
|
235 |
+
gr.Markdown(jokes_tagline)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
236 |
|
237 |
+
system_message_jokes = gr.Textbox(value="You are a friendly Jokes Chatbot. Provide a joke when asked.", visible=False)
|
238 |
+
chatbot_jokes = gr.Chatbot()
|
239 |
+
msg_jokes = gr.Textbox(label="Your message")
|
240 |
+
clear_jokes = gr.Button("Clear")
|
241 |
|
242 |
+
with gr.Accordion("Examples", open=False):
|
243 |
+
gr.Examples(
|
244 |
+
examples=[
|
245 |
+
["Tell me a joke"],
|
246 |
+
["Make me laugh"],
|
247 |
+
["Say something funny"],
|
248 |
+
],
|
249 |
+
inputs=msg_jokes,
|
250 |
+
)
|
251 |
+
|
252 |
with gr.Accordion("Additional Inputs", open=False):
|
253 |
+
max_tokens_jokes = gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens")
|
254 |
+
temperature_jokes = gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature")
|
255 |
+
top_p_jokes = gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)")
|
256 |
+
|
257 |
+
def respond_wrapper_jokes(message, chat_history, system_message_val, max_tokens_val, temperature_val, top_p_val):
|
258 |
chat_history, _ = send_message(
|
259 |
message=message,
|
260 |
history=chat_history,
|
|
|
263 |
temperature=temperature_val,
|
264 |
top_p=top_p_val,
|
265 |
)
|
266 |
+
return gr.update(value=""), chat_history
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
267 |
|
268 |
+
msg_jokes.submit(respond_wrapper_jokes, [msg_jokes, chatbot_jokes, system_message_jokes, max_tokens_jokes, temperature_jokes, top_p_jokes], [msg_jokes, chatbot_jokes])
|
269 |
+
clear_jokes.click(lambda: None, None, chatbot_jokes, queue=False)
|
270 |
|
271 |
+
# Launch the Gradio interface
|
272 |
+
demo.launch()
|