Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -6,14 +6,6 @@ For more information on `huggingface_hub` Inference API support, please check th
|
|
6 |
"""
|
7 |
client = InferenceClient("suayptalha/arrLlama")
|
8 |
|
9 |
-
def history_to_messages(history: list[tuple[str, str]], system_message: str):
|
10 |
-
messages = [{"role": "system", "content": system_message}]
|
11 |
-
for user, assistant in history:
|
12 |
-
if user:
|
13 |
-
messages.append({"role": "user", "content": user})
|
14 |
-
if assistant:
|
15 |
-
messages.append({"role": "assistant", "content": assistant})
|
16 |
-
return messages
|
17 |
|
18 |
def respond(
|
19 |
message,
|
@@ -23,55 +15,50 @@ def respond(
|
|
23 |
temperature,
|
24 |
top_p,
|
25 |
):
|
26 |
-
messages =
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
27 |
messages.append({"role": "user", "content": message})
|
28 |
|
29 |
response = ""
|
30 |
|
31 |
-
for
|
32 |
messages,
|
33 |
max_tokens=max_tokens,
|
34 |
stream=True,
|
35 |
temperature=temperature,
|
36 |
top_p=top_p,
|
37 |
):
|
38 |
-
token =
|
|
|
39 |
response += token
|
40 |
yield response
|
41 |
|
42 |
-
with gr.Blocks() as demo:
|
43 |
-
gr.Markdown("""<center><font size=8>ArrLlama Health AI Assistant</center>""")
|
44 |
-
|
45 |
-
with gr.Row():
|
46 |
-
with gr.Column(scale=3):
|
47 |
-
system_input = gr.Textbox(
|
48 |
-
value="You are an AI assistant whose sole purpose is to inform the user about arrhythmia, monitor their heart rhythm, heart rate, arrhythmia risk, body temperature, and sweating, and provide advice based on these factors. If the user is at risk of arrhythmia, experiencing an arrhythmia episode, or going through an attack, you will give advice and explain what to do. You must not deviate from these topics.",
|
49 |
-
lines=3,
|
50 |
-
label="System Message"
|
51 |
-
)
|
52 |
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
65 |
|
66 |
-
textbox.submit(
|
67 |
-
respond,
|
68 |
-
inputs=[textbox, chatbot, system_input, max_tokens, temperature, top_p],
|
69 |
-
outputs=chatbot
|
70 |
-
)
|
71 |
-
|
72 |
-
gr.Button("🧹 Clear History").click(fn=clear_session, inputs=[], outputs=chatbot)
|
73 |
-
gr.Button("🛠️ Set System Message and Clear History").click(
|
74 |
-
fn=modify_system, inputs=[system_input], outputs=[system_input, chatbot]
|
75 |
-
)
|
76 |
|
|
|
77 |
demo.launch()
|
|
|
6 |
"""
|
7 |
client = InferenceClient("suayptalha/arrLlama")
|
8 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
9 |
|
10 |
def respond(
|
11 |
message,
|
|
|
15 |
temperature,
|
16 |
top_p,
|
17 |
):
|
18 |
+
messages = [{"role": "system", "content": system_message}]
|
19 |
+
|
20 |
+
for val in history:
|
21 |
+
if val[0]:
|
22 |
+
messages.append({"role": "user", "content": val[0]})
|
23 |
+
if val[1]:
|
24 |
+
messages.append({"role": "assistant", "content": val[1]})
|
25 |
+
|
26 |
messages.append({"role": "user", "content": message})
|
27 |
|
28 |
response = ""
|
29 |
|
30 |
+
for message in client.chat_completion(
|
31 |
messages,
|
32 |
max_tokens=max_tokens,
|
33 |
stream=True,
|
34 |
temperature=temperature,
|
35 |
top_p=top_p,
|
36 |
):
|
37 |
+
token = message.choices[0].delta.content
|
38 |
+
|
39 |
response += token
|
40 |
yield response
|
41 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
42 |
|
43 |
+
"""
|
44 |
+
For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
|
45 |
+
"""
|
46 |
+
demo = gr.ChatInterface(
|
47 |
+
respond,
|
48 |
+
additional_inputs=[
|
49 |
+
gr.Textbox(value="You are an AI assistant whose sole purpose is to inform the user about arrhythmia, monitor their heart rhythm, heart rate, arrhythmia risk, body temperature, and sweating, and provide advice based on these factors. If the user is at risk of arrhythmia, experiencing an arrhythmia episode, or going through an attack, you will give advice and explain what to do. You must not deviate from these topics.", label="System message"),
|
50 |
+
gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
|
51 |
+
gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
|
52 |
+
gr.Slider(
|
53 |
+
minimum=0.1,
|
54 |
+
maximum=1.0,
|
55 |
+
value=0.95,
|
56 |
+
step=0.05,
|
57 |
+
label="Top-p (nucleus sampling)",
|
58 |
+
),
|
59 |
+
],
|
60 |
+
)
|
61 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
62 |
|
63 |
+
if __name__ == "__main__":
|
64 |
demo.launch()
|