Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -20,7 +20,7 @@ Follow these steps:
|
|
20 |
[Reason]: Execute plan with detailed analysis
|
21 |
[Verify]: Check logic and evidence
|
22 |
[Conclude]: Present structured conclusion
|
23 |
-
You are allowed to use code to think amd answer better
|
24 |
|
25 |
"""
|
26 |
|
@@ -59,8 +59,9 @@ def initialize_model():
|
|
59 |
def format_response(text):
|
60 |
return text.replace("[Understand]", '\n<strong class="special-tag">[Understand]</strong>\n') \
|
61 |
.replace("[Plan]", '\n<strong class="special-tag">[Plan]</strong>\n') \
|
62 |
-
.replace("[Conclude]", '\n<strong class="special-tag">[Conclude]</strong>\n')
|
63 |
-
|
|
|
64 |
@spaces.GPU
|
65 |
def generate_response(message, chat_history, system_prompt, temperature, max_tokens):
|
66 |
# Create conversation history for model
|
@@ -120,7 +121,7 @@ with gr.Blocks(css=CSS, theme=gr.themes.Soft()) as demo:
|
|
120 |
|
121 |
with gr.Accordion("⚙️ Settings", open=False):
|
122 |
system_prompt = gr.TextArea(value=DEFAULT_SYSTEM_PROMPT, label="System Instructions")
|
123 |
-
temperature = gr.Slider(0, 1, value=0.
|
124 |
max_tokens = gr.Slider(128, 4096, value=2048, label="Max Response Length")
|
125 |
|
126 |
clear = gr.Button("Clear History")
|
|
|
20 |
[Reason]: Execute plan with detailed analysis
|
21 |
[Verify]: Check logic and evidence
|
22 |
[Conclude]: Present structured conclusion
|
23 |
+
You are allowed to use code to think amd answer better.
|
24 |
|
25 |
"""
|
26 |
|
|
|
59 |
def format_response(text):
|
60 |
return text.replace("[Understand]", '\n<strong class="special-tag">[Understand]</strong>\n') \
|
61 |
.replace("[Plan]", '\n<strong class="special-tag">[Plan]</strong>\n') \
|
62 |
+
.replace("[Conclude]", '\n<strong class="special-tag">[Conclude]</strong>\n') \
|
63 |
+
.replace("[Reason]", '\n<strong class="special-tag">[Reason]</strong>\n') \
|
64 |
+
.replace("[Verify]", '\n<strong class="special-tag">[Verify]</strong>\n')
|
65 |
@spaces.GPU
|
66 |
def generate_response(message, chat_history, system_prompt, temperature, max_tokens):
|
67 |
# Create conversation history for model
|
|
|
121 |
|
122 |
with gr.Accordion("⚙️ Settings", open=False):
|
123 |
system_prompt = gr.TextArea(value=DEFAULT_SYSTEM_PROMPT, label="System Instructions")
|
124 |
+
temperature = gr.Slider(0, 1, value=0.5, label="Creativity")
|
125 |
max_tokens = gr.Slider(128, 4096, value=2048, label="Max Response Length")
|
126 |
|
127 |
clear = gr.Button("Clear History")
|