File size: 1,895 Bytes
1ee46d1
 
 
 
 
4080ee3
 
 
 
 
1ee46d1
 
 
4080ee3
 
1ee46d1
 
 
 
 
 
4080ee3
1ee46d1
4080ee3
 
 
 
 
 
 
1ee46d1
4080ee3
 
 
 
 
 
1ee46d1
4080ee3
 
1ee46d1
 
 
 
 
 
4080ee3
1ee46d1
 
 
 
 
4080ee3
1ee46d1
 
 
 
 
 
 
 
 
 
 
4080ee3
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
import gradio as gr
from gradio import ChatMessage
import time

def simulate_thinking_chat(message: str, history: list):
    history.append(
        ChatMessage(
            role="assistant",
            content="",
            metadata={"title": "Thinking... ", "log": "Starting analysis"}
        )
    )
    time.sleep(0.5)
    yield history

    thoughts = [
        "First, I need to understand the core aspects of the query...",
        "Now, considering the broader context and implications...",
        "Analyzing potential approaches to formulate a comprehensive answer...",
        "Finally, structuring the response for clarity and completeness..."
    ]

    accumulated_thoughts = ""

    for i, thought in enumerate(thoughts):
        time.sleep(0.5)

        accumulated_thoughts += f"- {thought}\n\n"

        history[-1] = ChatMessage(
            role="assistant",
            content=accumulated_thoughts.strip(),
            metadata={
                "title": "Thinking...",
                "log": f"Step {i+1} completed.",
                "duration": 0.5 * (i + 1)
            }
        )
        yield history

    history.append(
        ChatMessage(
            role="assistant",
            content="Based on my thoughts and analysis above, my response is: This dummy repro shows how thoughts of a thinking LLM can be progressively shown before providing its final answer."
        )
    )
    yield history

with gr.Blocks() as demo:
    gr.Markdown("# Thinking LLM Demo 🤔")
    chatbot = gr.Chatbot(type="messages", render_markdown=True)
    msg = gr.Textbox(placeholder="Type your message...")

    msg.submit(
        lambda m, h: (m, h + [ChatMessage(role="user", content=m)]),
        [msg, chatbot],
        [msg, chatbot]
    ).then(
        simulate_thinking_chat,
        [msg, chatbot],
        chatbot
    )

if __name__ == "__main__":
    demo.launch()