intellijmind / app.py
Threatthriver's picture
Update app.py
b5a485f verified
raw
history blame
5.12 kB
import gradio as gr
import os
import time
from cerebras.cloud.sdk import Cerebras
# Set up the Cerebras client
api_key = os.getenv("CEREBRAS_API_KEY")
if not api_key:
raise ValueError("CEREBRAS_API_KEY environment variable is not set.")
client = Cerebras(api_key=api_key)
def chat_with_cerebras(user_input):
"""
Handles interaction with the Cerebras model.
Sends user input and returns the model's response along with compute time and chain-of-thought reasoning.
"""
# Start compute time measurement
start_time = time.time()
try:
# Create a chat stream with Cerebras
stream = client.chat.completions.create(
messages=[
{"role": "system", "content": "You are IntellijMind, an advanced AI designed to assist users with detailed insights, problem-solving, and chain-of-thought reasoning."},
{"role": "user", "content": user_input}
],
model="llama-3.3-70b",
stream=True,
max_completion_tokens=1024,
temperature=0.2,
top_p=1
)
# Collect response from the stream
response = ""
chain_of_thought = ""
for chunk in stream:
if chunk.choices[0].delta and chunk.choices[0].delta.content:
content = chunk.choices[0].delta.content
response += content
if "Chain of Thought:" in content:
chain_of_thought += content.split("Chain of Thought:", 1)[-1]
# End compute time measurement
compute_time = time.time() - start_time
# Simulate token usage tracking (placeholder for real implementation)
token_usage = len(user_input.split()) + len(response.split())
return response, chain_of_thought, f"Compute Time: {compute_time:.2f} seconds", f"Tokens used: {token_usage}"
except Exception as e:
return "Error: Unable to process your request.", "", str(e), ""
# Gradio interface
def gradio_ui():
with gr.Blocks() as demo:
gr.Markdown("""# πŸš€ IntellijMind: The Future of AI Chatbots\nExperience the most advanced chatbot for deep insights, chain-of-thought reasoning, and unmatched clarity!""")
with gr.Row():
with gr.Column(scale=6):
chat_history = gr.Chatbot(label="Chat History")
with gr.Column(scale=2):
compute_time = gr.Textbox(label="Compute Time", interactive=False)
chain_of_thought_display = gr.Textbox(label="Chain of Thought", interactive=False, lines=10)
token_usage_display = gr.Textbox(label="Token Usage", interactive=False)
user_input = gr.Textbox(label="Type your message", placeholder="Ask me anything...", lines=2)
with gr.Row():
send_button = gr.Button("Send", variant="primary")
clear_button = gr.Button("Clear Chat")
export_button = gr.Button("Export Chat History")
def handle_chat(chat_history, user_input):
if not user_input.strip():
return chat_history, "", "", "", "Please enter a valid message."
ai_response, chain_of_thought, compute_info, token_usage = chat_with_cerebras(user_input)
chat_history.append((user_input, ai_response))
return chat_history, chain_of_thought, compute_info, token_usage
def clear_chat():
return [], "", "", ""
def export_chat(chat_history):
if not chat_history:
return "", "No chat history to export."
chat_text = "\n".join([f"User: {item[0]}\nAI: {item[1]}" for item in chat_history])
filename = f"chat_history_{int(time.time())}.txt"
with open(filename, "w") as file:
file.write(chat_text)
return f"Chat history exported to {filename}.", ""
send_button.click(handle_chat, inputs=[chat_history, user_input], outputs=[chat_history, chain_of_thought_display, compute_time, token_usage_display])
clear_button.click(clear_chat, outputs=[chat_history, chain_of_thought_display, compute_time, token_usage_display])
export_button.click(export_chat, inputs=[chat_history], outputs=[compute_time, chain_of_thought_display])
user_input.submit(handle_chat, inputs=[chat_history, user_input], outputs=[chat_history, chain_of_thought_display, compute_time, token_usage_display])
gr.Markdown("""---\n### 🌟 Features:\n- **Advanced Reasoning**: Chain-of-thought explanations for complex queries.\n- **Real-Time Performance Metrics**: Measure response compute time instantly.\n- **Token Usage Tracking**: Monitor token usage per response for transparency.\n- **Export Chat History**: Save your conversation as a text file for future reference.\n- **User-Friendly Design**: Intuitive chatbot interface with powerful features.\n- **Insightful Chain of Thought**: See the reasoning process behind AI decisions.\n- **Submit on Enter**: Seamless interaction with keyboard support.\n""")
return demo
# Run the Gradio app
demo = gradio_ui()
demo.launch()