Spaces:
Running
Running
Upload folder using huggingface_hub
Browse files
README.md
CHANGED
@@ -5,7 +5,7 @@ emoji: 🔥
|
|
5 |
colorFrom: indigo
|
6 |
colorTo: indigo
|
7 |
sdk: gradio
|
8 |
-
sdk_version: 5.
|
9 |
app_file: run.py
|
10 |
pinned: false
|
11 |
hf_oauth: true
|
|
|
5 |
colorFrom: indigo
|
6 |
colorTo: indigo
|
7 |
sdk: gradio
|
8 |
+
sdk_version: 5.13.0
|
9 |
app_file: run.py
|
10 |
pinned: false
|
11 |
hf_oauth: true
|
run.ipynb
CHANGED
@@ -1 +1 @@
|
|
1 |
-
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: chatbot_thoughts"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "from gradio import ChatMessage\n", "import time\n", "\n", "def simulate_thinking_chat(message: str, history: list):\n", "
|
|
|
1 |
+
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: chatbot_thoughts"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "from gradio import ChatMessage\n", "import time\n", "\n", "def simulate_thinking_chat(message: str, history: list):\n", " history.append(\n", " ChatMessage(\n", " role=\"assistant\",\n", " content=\"\",\n", " metadata={\"title\": \"Thinking... \", \"log\": \"Starting analysis\"}\n", " )\n", " )\n", " time.sleep(0.5)\n", " yield history\n", "\n", " thoughts = [\n", " \"First, I need to understand the core aspects of the query...\",\n", " \"Now, considering the broader context and implications...\",\n", " \"Analyzing potential approaches to formulate a comprehensive answer...\",\n", " \"Finally, structuring the response for clarity and completeness...\"\n", " ]\n", "\n", " accumulated_thoughts = \"\"\n", "\n", " for i, thought in enumerate(thoughts):\n", " time.sleep(0.5)\n", "\n", " accumulated_thoughts += f\"- {thought}\\n\\n\"\n", "\n", " history[-1] = ChatMessage(\n", " role=\"assistant\",\n", " content=accumulated_thoughts.strip(),\n", " metadata={\n", " \"title\": \"Thinking...\",\n", " \"log\": f\"Step {i+1} completed.\",\n", " \"duration\": 0.5 * (i + 1)\n", " }\n", " )\n", " yield history\n", "\n", " history.append(\n", " ChatMessage(\n", " role=\"assistant\",\n", " content=\"Based on my thoughts and analysis above, my response is: This dummy repro shows how thoughts of a thinking LLM can be progressively shown before providing its final answer.\"\n", " )\n", " )\n", " yield history\n", "\n", "with gr.Blocks() as demo:\n", " gr.Markdown(\"# Thinking LLM Demo \ud83e\udd14\")\n", " chatbot = gr.Chatbot(type=\"messages\", render_markdown=True)\n", " msg = gr.Textbox(placeholder=\"Type your message...\")\n", "\n", " msg.submit(\n", " lambda m, h: (m, h + [ChatMessage(role=\"user\", content=m)]),\n", " [msg, chatbot],\n", " [msg, chatbot]\n", " ).then(\n", " simulate_thinking_chat,\n", " [msg, chatbot],\n", " chatbot\n", " )\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
run.py
CHANGED
@@ -3,60 +3,54 @@ from gradio import ChatMessage
|
|
3 |
import time
|
4 |
|
5 |
def simulate_thinking_chat(message: str, history: list):
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
role="assistant", # Specifies this is from the assistant
|
12 |
-
content="", # Initially empty content
|
13 |
-
metadata={"title": "Thinking... "} # Setting a thinking header here
|
14 |
)
|
15 |
)
|
16 |
time.sleep(0.5)
|
17 |
-
yield history
|
18 |
-
|
19 |
-
# Define the thoughts that LLM will "think" through
|
20 |
thoughts = [
|
21 |
"First, I need to understand the core aspects of the query...",
|
22 |
"Now, considering the broader context and implications...",
|
23 |
"Analyzing potential approaches to formulate a comprehensive answer...",
|
24 |
"Finally, structuring the response for clarity and completeness..."
|
25 |
]
|
26 |
-
|
27 |
-
# Variable to store all thoughts as they accumulate
|
28 |
accumulated_thoughts = ""
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
# Update the thinking message with all thoughts so far
|
38 |
-
history[-1] = ChatMessage( # Updates last message in history
|
39 |
role="assistant",
|
40 |
-
content=accumulated_thoughts.strip(),
|
41 |
-
metadata={
|
|
|
|
|
|
|
|
|
42 |
)
|
43 |
-
yield history
|
44 |
-
|
45 |
-
# After thinking is complete, adding the final response
|
46 |
history.append(
|
47 |
ChatMessage(
|
48 |
role="assistant",
|
49 |
content="Based on my thoughts and analysis above, my response is: This dummy repro shows how thoughts of a thinking LLM can be progressively shown before providing its final answer."
|
50 |
)
|
51 |
)
|
52 |
-
yield history
|
53 |
|
54 |
-
# Gradio blocks with gr.chatbot
|
55 |
with gr.Blocks() as demo:
|
56 |
gr.Markdown("# Thinking LLM Demo 🤔")
|
57 |
chatbot = gr.Chatbot(type="messages", render_markdown=True)
|
58 |
msg = gr.Textbox(placeholder="Type your message...")
|
59 |
-
|
60 |
msg.submit(
|
61 |
lambda m, h: (m, h + [ChatMessage(role="user", content=m)]),
|
62 |
[msg, chatbot],
|
@@ -68,4 +62,4 @@ with gr.Blocks() as demo:
|
|
68 |
)
|
69 |
|
70 |
if __name__ == "__main__":
|
71 |
-
demo.launch()
|
|
|
3 |
import time
|
4 |
|
5 |
def simulate_thinking_chat(message: str, history: list):
|
6 |
+
history.append(
|
7 |
+
ChatMessage(
|
8 |
+
role="assistant",
|
9 |
+
content="",
|
10 |
+
metadata={"title": "Thinking... ", "log": "Starting analysis"}
|
|
|
|
|
|
|
11 |
)
|
12 |
)
|
13 |
time.sleep(0.5)
|
14 |
+
yield history
|
15 |
+
|
|
|
16 |
thoughts = [
|
17 |
"First, I need to understand the core aspects of the query...",
|
18 |
"Now, considering the broader context and implications...",
|
19 |
"Analyzing potential approaches to formulate a comprehensive answer...",
|
20 |
"Finally, structuring the response for clarity and completeness..."
|
21 |
]
|
22 |
+
|
|
|
23 |
accumulated_thoughts = ""
|
24 |
+
|
25 |
+
for i, thought in enumerate(thoughts):
|
26 |
+
time.sleep(0.5)
|
27 |
+
|
28 |
+
accumulated_thoughts += f"- {thought}\n\n"
|
29 |
+
|
30 |
+
history[-1] = ChatMessage(
|
|
|
|
|
|
|
31 |
role="assistant",
|
32 |
+
content=accumulated_thoughts.strip(),
|
33 |
+
metadata={
|
34 |
+
"title": "Thinking...",
|
35 |
+
"log": f"Step {i+1} completed.",
|
36 |
+
"duration": 0.5 * (i + 1)
|
37 |
+
}
|
38 |
)
|
39 |
+
yield history
|
40 |
+
|
|
|
41 |
history.append(
|
42 |
ChatMessage(
|
43 |
role="assistant",
|
44 |
content="Based on my thoughts and analysis above, my response is: This dummy repro shows how thoughts of a thinking LLM can be progressively shown before providing its final answer."
|
45 |
)
|
46 |
)
|
47 |
+
yield history
|
48 |
|
|
|
49 |
with gr.Blocks() as demo:
|
50 |
gr.Markdown("# Thinking LLM Demo 🤔")
|
51 |
chatbot = gr.Chatbot(type="messages", render_markdown=True)
|
52 |
msg = gr.Textbox(placeholder="Type your message...")
|
53 |
+
|
54 |
msg.submit(
|
55 |
lambda m, h: (m, h + [ChatMessage(role="user", content=m)]),
|
56 |
[msg, chatbot],
|
|
|
62 |
)
|
63 |
|
64 |
if __name__ == "__main__":
|
65 |
+
demo.launch()
|