import gradio as gr import os import time from cerebras.cloud.sdk import Cerebras # Set up the Cerebras client client = Cerebras(api_key=os.getenv("CEREBRAS_API_KEY")) def chat_with_cerebras(user_input): """ Handles interaction with the Cerebras model. Sends user input and returns the model's response along with compute time and chain-of-thought reasoning. """ # Start compute time measurement start_time = time.time() try: # Create a chat stream with Cerebras stream = client.chat.completions.create( messages=[ {"role": "system", "content": "You are IntellijMind, an advanced AI designed to assist users with detailed insights, problem-solving, and chain-of-thought reasoning."}, {"role": "user", "content": user_input} ], model="llama-3.3-70b", stream=True, max_completion_tokens=1024, temperature=0.2, top_p=1 ) # Collect response from the stream response = "" chain_of_thought = "" for chunk in stream: if chunk.choices[0].delta.content: response += chunk.choices[0].delta.content if "Chain of Thought:" in chunk.choices[0].delta.content: chain_of_thought += chunk.choices[0].delta.content.split("Chain of Thought:", 1)[-1] # End compute time measurement compute_time = time.time() - start_time return response, chain_of_thought, f"Compute Time: {compute_time:.2f} seconds" except Exception as e: return "Error: Unable to process your request.", "", str(e) # Gradio interface def gradio_ui(): with gr.Blocks() as demo: gr.Markdown("""# 🚀 IntellijMind: The Future of AI Chatbots\nExperience the most advanced chatbot for deep insights, chain-of-thought reasoning, and unmatched clarity!""") with gr.Row(): with gr.Column(scale=6): chat_history = gr.Chatbot(label="Chat History") with gr.Column(scale=2): compute_time = gr.Textbox(label="Compute Time", interactive=False) chain_of_thought_display = gr.Textbox(label="Chain of Thought", interactive=False, lines=10) user_input = gr.Textbox(label="Type your message", placeholder="Ask me anything...", lines=2) send_button = gr.Button("Send", variant="primary") clear_button = gr.Button("Clear Chat") def handle_chat(chat_history, user_input): ai_response, chain_of_thought, compute_info = chat_with_cerebras(user_input) chat_history.append((user_input, ai_response)) return chat_history, chain_of_thought, compute_info def clear_chat(): return [], "", "" send_button.click(handle_chat, inputs=[chat_history, user_input], outputs=[chat_history, chain_of_thought_display, compute_time]) clear_button.click(clear_chat, outputs=[chat_history, chain_of_thought_display, compute_time]) gr.Markdown("""---\n### 🌟 Features:\n- **Advanced Reasoning**: Chain-of-thought explanations for complex queries.\n- **Real-Time Performance Metrics**: Measure response compute time instantly.\n- **Insightful Chain of Thought**: See the reasoning process behind AI decisions.\n- **User-Friendly Design**: Intuitive chatbot interface with powerful features.\n- **Powered by IntellijMind**: Setting new standards for AI interaction.\n""") return demo # Run the Gradio app demo = gradio_ui() demo.launch()