File size: 4,550 Bytes
0e09629
8bb6b63
 
 
b4aa9f9
8bb6b63
 
 
 
494e327
8bb6b63
 
0f0407d
8bb6b63
 
 
 
 
 
 
 
494e327
8bb6b63
 
494e327
8bb6b63
494e327
 
 
8bb6b63
 
 
 
0f0407d
8bb6b63
0f0407d
 
 
 
8bb6b63
b4aa9f9
8bb6b63
 
 
b4aa9f9
 
 
 
8bb6b63
642bfad
b4aa9f9
 
 
 
8bb6b63
 
 
 
b4aa9f9
8bb6b63
 
0f0407d
c72759d
 
 
0f0407d
8bb6b63
b4aa9f9
c72759d
 
a26b3e0
8bb6b63
642bfad
b4aa9f9
642bfad
 
 
 
 
 
b4aa9f9
 
642bfad
b4aa9f9
494e327
b4aa9f9
 
c72759d
a26b3e0
0f0407d
a26b3e0
642bfad
494e327
 
642bfad
494e327
 
a26b3e0
b4aa9f9
642bfad
b4aa9f9
8bb6b63
 
 
b4aa9f9
8bb6b63
 
494e327
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
import gradio as gr
import os
import time
from cerebras.cloud.sdk import Cerebras
import markdown

# Set up the Cerebras client
client = Cerebras(api_key=os.getenv("CEREBRAS_API_KEY"))

def chat_with_cerebras(user_input, system_prompt, model, temperature, top_p, max_completion_tokens):
    """
    Handles interaction with the Cerebras model.
    Sends user input and returns the model's response along with compute time and chain-of-thought reasoning.
    """
    # Start compute time measurement
    start_time = time.time()

    try:
        # Create a chat stream with Cerebras
        stream = client.chat.completions.create(
            messages=[
                {"role": "system", "content": system_prompt},
                {"role": "user", "content": user_input}
            ],
            model=model,
            stream=True,
            max_completion_tokens=max_completion_tokens,
            temperature=temperature,
            top_p=top_p
        )

        # Collect response from the stream
        response = ""
        chain_of_thought = ""
        for chunk in stream:
            if chunk.choices[0].delta.content:
                response += chunk.choices[0].delta.content
            if "Chain of Thought:" in chunk.choices[0].delta.content:
                chain_of_thought += chunk.choices[0].delta.content.split("Chain of Thought:", 1)[-1]


        # End compute time measurement
        compute_time = time.time() - start_time

        # Improved formatting for chain of thought
        formatted_response = response
        if chain_of_thought:
           formatted_response += f"\n\n**Chain of Thought:**\n{chain_of_thought}"


        return formatted_response, chain_of_thought, f"Compute Time: {compute_time:.2f} seconds"

    except Exception as e:
         return f"Error: {str(e)}", "", "An error occurred. Please check your API key or the Cerebras service."

# Gradio interface
def gradio_ui():
    with gr.Blocks() as demo:
        gr.Markdown("""# πŸš€ IntellijMind Release 1st \nExperience the most advanced chatbot for deep insights and unmatched clarity!""")

        with gr.Row():
            with gr.Column(scale=6):
                chat_history = gr.Chatbot(label="Chat History")
            with gr.Column(scale=2):
                compute_time = gr.Textbox(label="Compute Time", interactive=False)
                chain_of_thought_display = gr.Textbox(label="Chain of Thought", interactive=False, lines=10)


        user_input = gr.Textbox(label="Type your message", placeholder="Ask me anything...", lines=2)
        send_button = gr.Button("Send", variant="primary")
        clear_button = gr.Button("Clear Chat")

        # Set default values for system prompt, model, etc.
        default_system_prompt = "You are IntellijMind, an advanced AI designed to assist users with detailed insights, problem-solving, and chain-of-thought reasoning. Provide your answers in markdown format. If you do not know the answer, mention that you do not know and don't make things up."
        default_model = "llama-3.3-70b"
        default_temperature = 0.2
        default_top_p = 1
        default_max_tokens = 1024

        def handle_chat(chat_history, user_input):
            chat_history.append((user_input, None))
            yield chat_history, "", "Thinking..."

            ai_response, chain_of_thought, compute_info = chat_with_cerebras(user_input, default_system_prompt, default_model, default_temperature, default_top_p, default_max_tokens)

            chat_history[-1] = (user_input, markdown.markdown(ai_response))  # render markdown output to HTML
            yield chat_history, chain_of_thought, compute_info

        def clear_chat():
            return [], "", ""


        send_button.click(
            handle_chat,
            inputs=[chat_history, user_input],
            outputs=[chat_history, chain_of_thought_display, compute_time]
        )

        clear_button.click(clear_chat, outputs=[chat_history, chain_of_thought_display, compute_time])

        gr.Markdown("""---\n### 🌟 Features:\n- **Advanced Reasoning**: Chain-of-thought explanations for complex queries.\n- **Real-Time Performance Metrics**: Measure response compute time instantly.\n- **Insightful Chain of Thought**: See the reasoning process behind AI decisions.\n- **User-Friendly Design**: Intuitive chatbot interface with powerful features.\n- **Powered by IntellijMind Release 1st**: Setting new standards for AI interaction.\n""")

    return demo


# Run the Gradio app
demo = gradio_ui()
demo.launch()