Spaces:
Runtime error
Runtime error
import gradio as gr | |
import os | |
import time | |
from cerebras.cloud.sdk import Cerebras | |
import markdown | |
# Set up the Cerebras client | |
client = Cerebras(api_key=os.getenv("CEREBRAS_API_KEY")) | |
def chat_with_cerebras(user_input, system_prompt, model, temperature, top_p, max_completion_tokens): | |
""" | |
Handles interaction with the Cerebras model. | |
Sends user input and returns the model's response along with compute time and chain-of-thought reasoning. | |
""" | |
# Start compute time measurement | |
start_time = time.time() | |
try: | |
# Create a chat stream with Cerebras | |
stream = client.chat.completions.create( | |
messages=[ | |
{"role": "system", "content": system_prompt}, | |
{"role": "user", "content": user_input} | |
], | |
model=model, | |
stream=True, | |
max_completion_tokens=max_completion_tokens, | |
temperature=temperature, | |
top_p=top_p | |
) | |
# Collect response from the stream | |
response = "" | |
chain_of_thought = "" | |
for chunk in stream: | |
if chunk.choices[0].delta.content: | |
response += chunk.choices[0].delta.content | |
if "Chain of Thought:" in chunk.choices[0].delta.content: | |
chain_of_thought += chunk.choices[0].delta.content.split("Chain of Thought:", 1)[-1] | |
# End compute time measurement | |
compute_time = time.time() - start_time | |
# Improved formatting for chain of thought | |
formatted_response = response | |
if chain_of_thought: | |
formatted_response += f"\n\n**Chain of Thought:**\n{chain_of_thought}" | |
return formatted_response, chain_of_thought, f"Compute Time: {compute_time:.2f} seconds" | |
except Exception as e: | |
return f"Error: {str(e)}", "", "An error occurred. Please check your API key or the Cerebras service." | |
# Gradio interface | |
def gradio_ui(): | |
with gr.Blocks() as demo: | |
gr.Markdown("""# π IntellijMind Release 1st \nExperience the most advanced chatbot for deep insights and unmatched clarity!""") | |
with gr.Row(): | |
with gr.Column(scale=6): | |
chat_history = gr.Chatbot(label="Chat History") | |
with gr.Column(scale=2): | |
compute_time = gr.Textbox(label="Compute Time", interactive=False) | |
chain_of_thought_display = gr.Textbox(label="Chain of Thought", interactive=False, lines=10) | |
user_input = gr.Textbox(label="Type your message", placeholder="Ask me anything...", lines=2) | |
send_button = gr.Button("Send", variant="primary") | |
clear_button = gr.Button("Clear Chat") | |
# Set default values for system prompt, model, etc. | |
default_system_prompt = "You are IntellijMind, an advanced AI designed to assist users with detailed insights, problem-solving, and chain-of-thought reasoning. Provide your answers in markdown format. If you do not know the answer, mention that you do not know and don't make things up." | |
default_model = "llama-3.3-70b" | |
default_temperature = 0.2 | |
default_top_p = 1 | |
default_max_tokens = 1024 | |
def handle_chat(chat_history, user_input): | |
chat_history.append((user_input, None)) | |
yield chat_history, "", "Thinking..." | |
ai_response, chain_of_thought, compute_info = chat_with_cerebras(user_input, default_system_prompt, default_model, default_temperature, default_top_p, default_max_tokens) | |
chat_history[-1] = (user_input, markdown.markdown(ai_response)) # render markdown output to HTML | |
yield chat_history, chain_of_thought, compute_info | |
def clear_chat(): | |
return [], "", "" | |
send_button.click( | |
handle_chat, | |
inputs=[chat_history, user_input], | |
outputs=[chat_history, chain_of_thought_display, compute_time] | |
) | |
clear_button.click(clear_chat, outputs=[chat_history, chain_of_thought_display, compute_time]) | |
gr.Markdown("""---\n### π Features:\n- **Advanced Reasoning**: Chain-of-thought explanations for complex queries.\n- **Real-Time Performance Metrics**: Measure response compute time instantly.\n- **Insightful Chain of Thought**: See the reasoning process behind AI decisions.\n- **User-Friendly Design**: Intuitive chatbot interface with powerful features.\n- **Powered by IntellijMind Release 1st**: Setting new standards for AI interaction.\n""") | |
return demo | |
# Run the Gradio app | |
demo = gradio_ui() | |
demo.launch() |