Spaces:
Running
Running
Add editor behavior toggle
Browse files
app.py
CHANGED
@@ -119,12 +119,14 @@ class ConvoState:
|
|
119 |
self.convo = []
|
120 |
self.initialize_new_round()
|
121 |
self.is_error = False
|
|
|
122 |
|
123 |
def initialize_new_round(self):
|
124 |
self.current = {}
|
125 |
self.current["user"] = ""
|
126 |
self.current["cot"] = ""
|
127 |
self.current["result"] = ""
|
|
|
128 |
self.convo.append(self.current)
|
129 |
|
130 |
def flatten_output(self):
|
@@ -147,6 +149,7 @@ class ConvoState:
|
|
147 |
lang_data = LANGUAGE_CONFIG[self.current_language]
|
148 |
dynamic_state.stream_completed = False
|
149 |
full_response = current_content
|
|
|
150 |
api_client = OpenAI(
|
151 |
api_key=os.getenv("API_KEY"),
|
152 |
base_url=os.getenv("API_URL"),
|
@@ -154,6 +157,8 @@ class ConvoState:
|
|
154 |
)
|
155 |
coordinator = CoordinationManager(self.sync_threshold, current_content)
|
156 |
|
|
|
|
|
157 |
try:
|
158 |
|
159 |
# 初始等待状态更新
|
@@ -192,6 +197,7 @@ class ConvoState:
|
|
192 |
if chunk_content:
|
193 |
dynamic_state.waiting_api = False
|
194 |
full_response += chunk_content
|
|
|
195 |
# Update Convo State
|
196 |
think_complete = "</think>" in full_response
|
197 |
dynamic_state.in_cot = not think_complete
|
@@ -210,7 +216,11 @@ class ConvoState:
|
|
210 |
else lang_data["loading_output"]
|
211 |
)
|
212 |
editor_label = f"{lang_data['editor_label']} - {status}"
|
213 |
-
|
|
|
|
|
|
|
|
|
214 |
label=editor_label
|
215 |
), self.flatten_output()
|
216 |
|
@@ -246,9 +256,9 @@ class ConvoState:
|
|
246 |
)
|
247 |
editor_label = f"{lang_data['editor_label']} - {final_status}"
|
248 |
if not self.is_error:
|
249 |
-
yield
|
250 |
else:
|
251 |
-
yield
|
252 |
{
|
253 |
"role": "assistant",
|
254 |
"content": error_msg,
|
@@ -298,6 +308,7 @@ def update_interface_language(selected_lang, convo_state, dynamic_state):
|
|
298 |
),
|
299 |
gr.update(value=lang_data["introduction"]),
|
300 |
gr.update(value=lang_data["bot_default"], label=lang_data["bot_label"]),
|
|
|
301 |
]
|
302 |
|
303 |
|
@@ -330,18 +341,19 @@ with gr.Blocks(theme=theme, css_paths="styles.css") as demo:
|
|
330 |
container=False,
|
331 |
)
|
332 |
|
333 |
-
with gr.Row(equal_height=True):
|
334 |
|
|
|
335 |
with gr.Column(scale=1, min_width=400):
|
336 |
prompt_input = gr.Textbox(
|
337 |
label=LANGUAGE_CONFIG["en"]["prompt_label"],
|
338 |
lines=2,
|
339 |
placeholder=LANGUAGE_CONFIG["en"]["prompt_placeholder"],
|
340 |
-
max_lines=
|
341 |
)
|
342 |
thought_editor = gr.Textbox(
|
343 |
label=f"{LANGUAGE_CONFIG['en']['editor_label']} - {LANGUAGE_CONFIG['en']['editor_default']}",
|
344 |
lines=16,
|
|
|
345 |
placeholder=LANGUAGE_CONFIG["en"]["editor_placeholder"],
|
346 |
autofocus=True,
|
347 |
elem_id="editor",
|
@@ -381,6 +393,7 @@ with gr.Blocks(theme=theme, css_paths="styles.css") as demo:
|
|
381 |
label=LANGUAGE_CONFIG["en"]["throughput_label"],
|
382 |
info=LANGUAGE_CONFIG["en"]["throughput_info"],
|
383 |
)
|
|
|
384 |
|
385 |
intro_md = gr.Markdown(LANGUAGE_CONFIG["en"]["introduction"], visible=False)
|
386 |
|
@@ -413,7 +426,6 @@ with gr.Blocks(theme=theme, css_paths="styles.css") as demo:
|
|
413 |
for response in convo_state.generate_ai_response(
|
414 |
prompt, content, dynamic_state
|
415 |
):
|
416 |
-
print(response)
|
417 |
yield response + ({"prompt_input": convo_state.current["user"], "thought_editor":convo_state.current["cot"]},)
|
418 |
|
419 |
gr.on(
|
@@ -444,6 +456,23 @@ with gr.Blocks(theme=theme, css_paths="styles.css") as demo:
|
|
444 |
show_progress=False
|
445 |
)
|
446 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
447 |
lang_selector.change(
|
448 |
lambda lang, s, d: update_interface_language(lang, s, d),
|
449 |
[lang_selector, convo_state, dynamic_state],
|
@@ -458,6 +487,7 @@ with gr.Blocks(theme=theme, css_paths="styles.css") as demo:
|
|
458 |
next_turn_btn,
|
459 |
intro_md,
|
460 |
chatbot,
|
|
|
461 |
],
|
462 |
concurrency_limit=None,
|
463 |
)
|
|
|
119 |
self.convo = []
|
120 |
self.initialize_new_round()
|
121 |
self.is_error = False
|
122 |
+
self.result_editing_toggle = False
|
123 |
|
124 |
def initialize_new_round(self):
|
125 |
self.current = {}
|
126 |
self.current["user"] = ""
|
127 |
self.current["cot"] = ""
|
128 |
self.current["result"] = ""
|
129 |
+
self.current["raw"] = ""
|
130 |
self.convo.append(self.current)
|
131 |
|
132 |
def flatten_output(self):
|
|
|
149 |
lang_data = LANGUAGE_CONFIG[self.current_language]
|
150 |
dynamic_state.stream_completed = False
|
151 |
full_response = current_content
|
152 |
+
self.current["raw"] = full_response
|
153 |
api_client = OpenAI(
|
154 |
api_key=os.getenv("API_KEY"),
|
155 |
base_url=os.getenv("API_URL"),
|
|
|
157 |
)
|
158 |
coordinator = CoordinationManager(self.sync_threshold, current_content)
|
159 |
|
160 |
+
editor_output = current_content
|
161 |
+
|
162 |
try:
|
163 |
|
164 |
# 初始等待状态更新
|
|
|
197 |
if chunk_content:
|
198 |
dynamic_state.waiting_api = False
|
199 |
full_response += chunk_content
|
200 |
+
self.current["raw"] = full_response
|
201 |
# Update Convo State
|
202 |
think_complete = "</think>" in full_response
|
203 |
dynamic_state.in_cot = not think_complete
|
|
|
216 |
else lang_data["loading_output"]
|
217 |
)
|
218 |
editor_label = f"{lang_data['editor_label']} - {status}"
|
219 |
+
if self.result_editing_toggle:
|
220 |
+
editor_output = full_response
|
221 |
+
else:
|
222 |
+
editor_output = self.current["cot"] + ("</think>" if think_complete else "")
|
223 |
+
yield editor_output, gr.update(
|
224 |
label=editor_label
|
225 |
), self.flatten_output()
|
226 |
|
|
|
256 |
)
|
257 |
editor_label = f"{lang_data['editor_label']} - {final_status}"
|
258 |
if not self.is_error:
|
259 |
+
yield editor_output, gr.update(label=editor_label), self.flatten_output()
|
260 |
else:
|
261 |
+
yield editor_output, gr.update(label=editor_label_error), self.flatten_output() + [
|
262 |
{
|
263 |
"role": "assistant",
|
264 |
"content": error_msg,
|
|
|
308 |
),
|
309 |
gr.update(value=lang_data["introduction"]),
|
310 |
gr.update(value=lang_data["bot_default"], label=lang_data["bot_label"]),
|
311 |
+
gr.update(label=lang_data["result_editing_toggle"]),
|
312 |
]
|
313 |
|
314 |
|
|
|
341 |
container=False,
|
342 |
)
|
343 |
|
|
|
344 |
|
345 |
+
with gr.Row(equal_height=True):
|
346 |
with gr.Column(scale=1, min_width=400):
|
347 |
prompt_input = gr.Textbox(
|
348 |
label=LANGUAGE_CONFIG["en"]["prompt_label"],
|
349 |
lines=2,
|
350 |
placeholder=LANGUAGE_CONFIG["en"]["prompt_placeholder"],
|
351 |
+
max_lines=2,
|
352 |
)
|
353 |
thought_editor = gr.Textbox(
|
354 |
label=f"{LANGUAGE_CONFIG['en']['editor_label']} - {LANGUAGE_CONFIG['en']['editor_default']}",
|
355 |
lines=16,
|
356 |
+
max_lines=16,
|
357 |
placeholder=LANGUAGE_CONFIG["en"]["editor_placeholder"],
|
358 |
autofocus=True,
|
359 |
elem_id="editor",
|
|
|
393 |
label=LANGUAGE_CONFIG["en"]["throughput_label"],
|
394 |
info=LANGUAGE_CONFIG["en"]["throughput_info"],
|
395 |
)
|
396 |
+
result_editing_toggle = gr.Checkbox(label=LANGUAGE_CONFIG["en"]["result_editing_toggle"], interactive=True, scale=0, container=False)
|
397 |
|
398 |
intro_md = gr.Markdown(LANGUAGE_CONFIG["en"]["introduction"], visible=False)
|
399 |
|
|
|
426 |
for response in convo_state.generate_ai_response(
|
427 |
prompt, content, dynamic_state
|
428 |
):
|
|
|
429 |
yield response + ({"prompt_input": convo_state.current["user"], "thought_editor":convo_state.current["cot"]},)
|
430 |
|
431 |
gr.on(
|
|
|
456 |
show_progress=False
|
457 |
)
|
458 |
|
459 |
+
def toggle_editor_result(convo_state, allow):
|
460 |
+
setattr(convo_state, "result_editing_toggle", allow)
|
461 |
+
if allow:
|
462 |
+
print(convo_state.current["raw"])
|
463 |
+
return gr.update(value=convo_state.current["raw"])
|
464 |
+
else:
|
465 |
+
print(convo_state.current["cot"])
|
466 |
+
return gr.update(value=convo_state.current["cot"])
|
467 |
+
|
468 |
+
|
469 |
+
|
470 |
+
result_editing_toggle.change(
|
471 |
+
toggle_editor_result,
|
472 |
+
inputs=[convo_state, result_editing_toggle],
|
473 |
+
outputs=[thought_editor]
|
474 |
+
)
|
475 |
+
|
476 |
lang_selector.change(
|
477 |
lambda lang, s, d: update_interface_language(lang, s, d),
|
478 |
[lang_selector, convo_state, dynamic_state],
|
|
|
487 |
next_turn_btn,
|
488 |
intro_md,
|
489 |
chatbot,
|
490 |
+
result_editing_toggle
|
491 |
],
|
492 |
concurrency_limit=None,
|
493 |
)
|
lang.py
CHANGED
@@ -35,7 +35,8 @@ LANGUAGE_CONFIG = {
|
|
35 |
"editor_default": "AI thought will start with this, leave blank to think freely",
|
36 |
"waiting_api": "⏳ Waiting for API response",
|
37 |
"api_retry": "🔁 API no response, hit Shift+Enter to try again.",
|
38 |
-
"api_interrupted": "⚠️ Pasued, API connection interrupted. Hit Shift+Enter to reconnect"
|
|
|
39 |
|
40 |
},
|
41 |
"zh": {
|
@@ -74,6 +75,8 @@ LANGUAGE_CONFIG = {
|
|
74 |
"editor_default": "AI思维会以此开头,留空即为默认思考",
|
75 |
"waiting_api": "⏳ 等待API响应",
|
76 |
"api_retry": "🔁 API无响应, Shift+Enter 重试一次试试?",
|
77 |
-
"api_interrupted": "⚠️ 暂停,API连接意外中断,Shift+Enter 可重连"
|
|
|
|
|
78 |
},
|
79 |
}
|
|
|
35 |
"editor_default": "AI thought will start with this, leave blank to think freely",
|
36 |
"waiting_api": "⏳ Waiting for API response",
|
37 |
"api_retry": "🔁 API no response, hit Shift+Enter to try again.",
|
38 |
+
"api_interrupted": "⚠️ Pasued, API connection interrupted. Hit Shift+Enter to reconnect",
|
39 |
+
"result_editing_toggle": "Editor includes Result"
|
40 |
|
41 |
},
|
42 |
"zh": {
|
|
|
75 |
"editor_default": "AI思维会以此开头,留空即为默认思考",
|
76 |
"waiting_api": "⏳ 等待API响应",
|
77 |
"api_retry": "🔁 API无响应, Shift+Enter 重试一次试试?",
|
78 |
+
"api_interrupted": "⚠️ 暂停,API连接意外中断,Shift+Enter 可重连",
|
79 |
+
"result_editing_toggle": "编辑器包括最终答案"
|
80 |
+
|
81 |
},
|
82 |
}
|