Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -6,7 +6,7 @@ from cerebras.cloud.sdk import Cerebras
|
|
6 |
# Set up the Cerebras client
|
7 |
client = Cerebras(api_key=os.getenv("CEREBRAS_API_KEY"))
|
8 |
|
9 |
-
def chat_with_cerebras(user_input):
|
10 |
"""
|
11 |
Handles interaction with the Cerebras model.
|
12 |
Sends user input and returns the model's response along with compute time and chain-of-thought reasoning.
|
@@ -18,14 +18,14 @@ def chat_with_cerebras(user_input):
|
|
18 |
# Create a chat stream with Cerebras
|
19 |
stream = client.chat.completions.create(
|
20 |
messages=[
|
21 |
-
{"role": "system", "content":
|
22 |
{"role": "user", "content": user_input}
|
23 |
],
|
24 |
-
model=
|
25 |
stream=True,
|
26 |
-
max_completion_tokens=
|
27 |
-
temperature=
|
28 |
-
top_p=
|
29 |
)
|
30 |
|
31 |
# Collect response from the stream
|
@@ -43,7 +43,7 @@ def chat_with_cerebras(user_input):
|
|
43 |
return response, chain_of_thought, f"Compute Time: {compute_time:.2f} seconds"
|
44 |
|
45 |
except Exception as e:
|
46 |
-
return "Error:
|
47 |
|
48 |
# Gradio interface
|
49 |
def gradio_ui():
|
@@ -57,19 +57,34 @@ def gradio_ui():
|
|
57 |
compute_time = gr.Textbox(label="Compute Time", interactive=False)
|
58 |
chain_of_thought_display = gr.Textbox(label="Chain of Thought", interactive=False, lines=10)
|
59 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
60 |
user_input = gr.Textbox(label="Type your message", placeholder="Ask me anything...", lines=2)
|
61 |
send_button = gr.Button("Send", variant="primary")
|
62 |
clear_button = gr.Button("Clear Chat")
|
63 |
|
64 |
-
def handle_chat(chat_history, user_input):
|
65 |
-
|
66 |
-
chat_history
|
67 |
-
|
|
|
|
|
|
|
68 |
|
69 |
def clear_chat():
|
70 |
return [], "", ""
|
71 |
|
72 |
-
send_button.click(
|
|
|
|
|
|
|
|
|
73 |
clear_button.click(clear_chat, outputs=[chat_history, chain_of_thought_display, compute_time])
|
74 |
|
75 |
gr.Markdown("""---\n### 🌟 Features:\n- **Advanced Reasoning**: Chain-of-thought explanations for complex queries.\n- **Real-Time Performance Metrics**: Measure response compute time instantly.\n- **Insightful Chain of Thought**: See the reasoning process behind AI decisions.\n- **User-Friendly Design**: Intuitive chatbot interface with powerful features.\n- **Powered by IntellijMind**: Setting new standards for AI interaction.\n""")
|
@@ -78,4 +93,4 @@ def gradio_ui():
|
|
78 |
|
79 |
# Run the Gradio app
|
80 |
demo = gradio_ui()
|
81 |
-
demo.launch()
|
|
|
6 |
# Set up the Cerebras client
|
7 |
client = Cerebras(api_key=os.getenv("CEREBRAS_API_KEY"))
|
8 |
|
9 |
+
def chat_with_cerebras(user_input, system_prompt, model, temperature, top_p, max_completion_tokens):
|
10 |
"""
|
11 |
Handles interaction with the Cerebras model.
|
12 |
Sends user input and returns the model's response along with compute time and chain-of-thought reasoning.
|
|
|
18 |
# Create a chat stream with Cerebras
|
19 |
stream = client.chat.completions.create(
|
20 |
messages=[
|
21 |
+
{"role": "system", "content": system_prompt},
|
22 |
{"role": "user", "content": user_input}
|
23 |
],
|
24 |
+
model=model,
|
25 |
stream=True,
|
26 |
+
max_completion_tokens=max_completion_tokens,
|
27 |
+
temperature=temperature,
|
28 |
+
top_p=top_p
|
29 |
)
|
30 |
|
31 |
# Collect response from the stream
|
|
|
43 |
return response, chain_of_thought, f"Compute Time: {compute_time:.2f} seconds"
|
44 |
|
45 |
except Exception as e:
|
46 |
+
return f"Error: {str(e)}", "", "An error occurred. Please check your API key or the Cerebras service."
|
47 |
|
48 |
# Gradio interface
|
49 |
def gradio_ui():
|
|
|
57 |
compute_time = gr.Textbox(label="Compute Time", interactive=False)
|
58 |
chain_of_thought_display = gr.Textbox(label="Chain of Thought", interactive=False, lines=10)
|
59 |
|
60 |
+
with gr.Accordion("Advanced Settings", open=False):
|
61 |
+
system_prompt_input = gr.Textbox(label="System Prompt", value="You are IntellijMind, an advanced AI designed to assist users with detailed insights, problem-solving, and chain-of-thought reasoning.", lines=3)
|
62 |
+
model_select = gr.Dropdown(label="Model", choices=["llama-3.3-70b", "llama-2-70b"], value="llama-3.3-70b") # Add other model names as required
|
63 |
+
temperature_slider = gr.Slider(label="Temperature", minimum=0, maximum=2, step=0.1, value=0.2)
|
64 |
+
top_p_slider = gr.Slider(label="Top P", minimum=0, maximum=1, step=0.01, value=1)
|
65 |
+
max_tokens_slider = gr.Slider(label="Max Tokens", minimum=50, maximum=2048, step=50, value=1024)
|
66 |
+
|
67 |
+
|
68 |
user_input = gr.Textbox(label="Type your message", placeholder="Ask me anything...", lines=2)
|
69 |
send_button = gr.Button("Send", variant="primary")
|
70 |
clear_button = gr.Button("Clear Chat")
|
71 |
|
72 |
+
def handle_chat(chat_history, user_input, system_prompt, model, temperature, top_p, max_tokens):
|
73 |
+
chat_history.append((user_input, None))
|
74 |
+
yield chat_history, "", "Thinking..."
|
75 |
+
|
76 |
+
ai_response, chain_of_thought, compute_info = chat_with_cerebras(user_input, system_prompt, model, temperature, top_p, max_tokens)
|
77 |
+
chat_history[-1] = (user_input, ai_response)
|
78 |
+
yield chat_history, chain_of_thought, compute_info
|
79 |
|
80 |
def clear_chat():
|
81 |
return [], "", ""
|
82 |
|
83 |
+
send_button.click(
|
84 |
+
handle_chat,
|
85 |
+
inputs=[chat_history, user_input, system_prompt_input, model_select, temperature_slider, top_p_slider, max_tokens_slider],
|
86 |
+
outputs=[chat_history, chain_of_thought_display, compute_time]
|
87 |
+
)
|
88 |
clear_button.click(clear_chat, outputs=[chat_history, chain_of_thought_display, compute_time])
|
89 |
|
90 |
gr.Markdown("""---\n### 🌟 Features:\n- **Advanced Reasoning**: Chain-of-thought explanations for complex queries.\n- **Real-Time Performance Metrics**: Measure response compute time instantly.\n- **Insightful Chain of Thought**: See the reasoning process behind AI decisions.\n- **User-Friendly Design**: Intuitive chatbot interface with powerful features.\n- **Powered by IntellijMind**: Setting new standards for AI interaction.\n""")
|
|
|
93 |
|
94 |
# Run the Gradio app
|
95 |
demo = gradio_ui()
|
96 |
+
demo.launch()
|