Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -1,5 +1,3 @@
|
|
1 |
-
|
2 |
-
|
3 |
import os
|
4 |
import gradio as gr
|
5 |
import random
|
@@ -27,6 +25,7 @@ genai.configure(api_key=GEMINI_API_KEY)
|
|
27 |
# ์ฌ์ฉํ Gemini 2.0 Flash ๋ชจ๋ธ (Thinking ๊ธฐ๋ฅ ํฌํจ)
|
28 |
model = genai.GenerativeModel("gemini-2.0-flash-thinking-exp-01-21")
|
29 |
|
|
|
30 |
##############################################################################
|
31 |
# ๋ณํ ๋ฌธ์์ด์์ ์ฌ๋์("/")๋ก ๊ตฌ๋ถ๋ ๋ ์ต์
์ค ํ๋ ์ ํ
|
32 |
##############################################################################
|
@@ -51,10 +50,11 @@ def choose_alternative(transformation):
|
|
51 |
else:
|
52 |
return random.choice([left, right])
|
53 |
|
|
|
54 |
##############################################################################
|
55 |
-
# ์นดํ
๊ณ ๋ฆฌ ์ฌ์
|
|
|
56 |
##############################################################################
|
57 |
-
|
58 |
physical_transformation_categories = {
|
59 |
"๊ณต๊ฐ ์ด๋": [
|
60 |
"์/๋ค ์ด๋", "์ข/์ฐ ์ด๋", "์/์๋ ์ด๋", "์ธ๋ก์ถ ํ์ (๊ณ ๊ฐ ๋๋์)",
|
@@ -195,6 +195,8 @@ physical_transformation_categories = {
|
|
195 |
"๋ผ์ด๋ค ์ผ์/๊ฐ์ง", "ํฐ์น ์ผ์/๊ฐ์ง", "์ ์ค์ฒ ์ผ์/๊ฐ์ง", "์ฌ๋ฐ ์ผ์/๊ฐ์ง", "ํ์ ์ผ์/๊ฐ์ง"
|
196 |
]
|
197 |
}
|
|
|
|
|
198 |
##############################################################################
|
199 |
# ์คํธ๋ฆฌ๋ฐ์ฉ Gemini API ํจ์:
|
200 |
# - 'Thinking' ๋จ๊ณ(์์ด๋์ด ๋ด๋ถ ์ถ๋ก )์ ์ต์ข
'Response' ๋จ๊ณ๋ก ๊ตฌ์ฑ
|
@@ -213,13 +215,10 @@ def query_gemini_api_stream(prompt: str) -> Iterator[str]:
|
|
213 |
thinking_complete = False
|
214 |
|
215 |
for chunk in response:
|
216 |
-
# ๊ฐ chunk์๋ candidates[0].content.parts๊ฐ ๋ค์ด์๋ค
|
217 |
parts = chunk.candidates[0].content.parts
|
218 |
|
219 |
-
#
|
220 |
-
# ๊ทธ ์ธ์๋ 1๊ฐ์ฉ ๋์ด์ ๋ค์ด์ฌ ์ ์์
|
221 |
if len(parts) == 2 and not thinking_complete:
|
222 |
-
# ์์ง Thinking ์ค์ธ๋ฐ, ์์ฑ๋ Thinking + Response ์์์ด ํ ๋ฒ์ ์ด
|
223 |
thought_buffer += parts[0].text
|
224 |
yield f"[Thinking Chunk] {parts[0].text}"
|
225 |
|
@@ -228,27 +227,24 @@ def query_gemini_api_stream(prompt: str) -> Iterator[str]:
|
|
228 |
|
229 |
thinking_complete = True
|
230 |
elif thinking_complete:
|
231 |
-
#
|
232 |
current_chunk = parts[0].text
|
233 |
response_buffer += current_chunk
|
234 |
yield current_chunk
|
235 |
else:
|
236 |
-
#
|
237 |
current_chunk = parts[0].text
|
238 |
thought_buffer += current_chunk
|
239 |
yield f"[Thinking Chunk] {current_chunk}"
|
240 |
|
241 |
-
# ์คํธ๋ฆฌ๋ฐ ์๋ฃ ํ ์ต์ข
๊ฒฐ๊ณผ ํ๋ฒ์
|
242 |
yield f"\n[Final Response]\n{response_buffer}"
|
243 |
|
|
|
244 |
##############################################################################
|
245 |
# ์นดํ
๊ณ ๋ฆฌ๋ณ ๊ฐ๋จ ์ค๋ช
์ 'Thinking' + 'Response'๋ก ํ์ฅ (์คํธ๋ฆฌ๋ฐ)
|
246 |
##############################################################################
|
247 |
def enhance_with_llm_stream(base_description, obj_name, category) -> Iterator[str]:
|
248 |
-
"""
|
249 |
-
๊ธฐ์กด enhance_with_llm๋ฅผ ์คํธ๋ฆฌ๋ฐ ํํ๋ก ๋ฐ๊พผ ํจ์:
|
250 |
-
'Thinking' + 'Response' ๋จ๊ณ๋ฅผ chunk๋ก ์์ฐจ ์ ๋ฌ
|
251 |
-
"""
|
252 |
prompt = f"""
|
253 |
๋ค์์ '{obj_name}'์ '{category}' ๊ด๋ จ ๊ฐ๋จํ ์ค๋ช
์
๋๋ค:
|
254 |
"{base_description}"
|
@@ -257,10 +253,10 @@ def enhance_with_llm_stream(base_description, obj_name, category) -> Iterator[st
|
|
257 |
2) ํ์ ํฌ์ธํธ์ ๊ธฐ๋ฅ์ฑ ๋ฑ์ ์ค์ฌ์ผ๋ก
|
258 |
3~4๋ฌธ์ฅ์ ์์ด๋์ด๋ก ํ์ฅํด ์ฃผ์ธ์.
|
259 |
"""
|
260 |
-
# query_gemini_api_stream()๋ก๋ถํฐ chunk๋ฅผ ๋ฐ์ ๊ทธ๋๋ก yield
|
261 |
for chunk in query_gemini_api_stream(prompt):
|
262 |
yield chunk
|
263 |
|
|
|
264 |
##############################################################################
|
265 |
# ํ ํค์๋(์ค๋ธ์ ํธ)์ ๋ํ ๊ธฐ๋ณธ ์์ด๋์ด(์นดํ
๊ณ ๋ฆฌ๋ณ) ์์ฑ
|
266 |
##############################################################################
|
@@ -302,6 +298,7 @@ def generate_three_objects_interaction(obj1, obj2, obj3):
|
|
302 |
results[category] = {"base": base_description, "enhanced": ""}
|
303 |
return results
|
304 |
|
|
|
305 |
##############################################################################
|
306 |
# ์ค์ ๋ณํ ์์ฑ ๋ก์ง
|
307 |
##############################################################################
|
@@ -322,12 +319,9 @@ def generate_transformations(text1, text2=None, text3=None):
|
|
322 |
##############################################################################
|
323 |
def process_inputs_stream(text1, text2, text3) -> Iterator[list]:
|
324 |
"""
|
325 |
-
Gradio์ Chatbot
|
326 |
-
[(role='assistant'
|
327 |
-
์๊ฐ(Thinking) ๋จ๊ณ์ ์ต์ข
์๋ต์ ๋ถ๋ฆฌํด์ ์ค์๊ฐ ์ ์ก.
|
328 |
"""
|
329 |
-
messages = []
|
330 |
-
|
331 |
# 1) ์
๋ ฅ๊ฐ ํ์ธ
|
332 |
yield [("assistant", "์
๋ ฅ๊ฐ ํ์ธ ์ค...")]
|
333 |
time.sleep(0.3)
|
@@ -340,42 +334,34 @@ def process_inputs_stream(text1, text2, text3) -> Iterator[list]:
|
|
340 |
return
|
341 |
|
342 |
# 2) ์์ด๋์ด ์์ฑ
|
343 |
-
yield [("assistant", "์ฐฝ์์ ์ธ ๋ชจ๋ธ/์ปจ์
/ํ์ ๋ณํ ์์ด๋์ด ์์ฑ ์ค... (์นดํ
๊ณ ๋ฆฌ๋ณ ๋ถ์)")]
|
344 |
time.sleep(0.3)
|
345 |
results, objects = generate_transformations(text1, text2, text3)
|
346 |
-
|
347 |
-
# ์นดํ
๊ณ ๋ฆฌ๋ณ ์คํธ๋ฆฌ๋ฐ ์ฒ๋ฆฌ
|
348 |
obj_name = " ๋ฐ ".join([obj for obj in objects if obj])
|
349 |
|
|
|
350 |
for i, (category, result_dict) in enumerate(results.items(), start=1):
|
351 |
base_desc = result_dict["base"]
|
352 |
-
|
353 |
-
# ์นดํ
๊ณ ๋ฆฌ ์๋ด ์ถ๋ ฅ
|
354 |
yield [("assistant", f"**[{i}/{len(results)}] ์นดํ
๊ณ ๋ฆฌ:** {category}\n\n๊ธฐ๋ณธ ์์ด๋์ด: {base_desc}\n\n์ง๊ธ๋ถํฐ Thinking + Response๋ฅผ ๋จ๊ณ์ ์ผ๋ก ์คํธ๋ฆฌ๋ฐํฉ๋๋ค...")]
|
355 |
time.sleep(0.5)
|
356 |
|
357 |
-
# ์คํธ๋ฆฌ๋ฐ LLM ํธ์ถ
|
358 |
thinking_text = ""
|
359 |
response_text = ""
|
360 |
is_thinking_done = False
|
361 |
|
362 |
-
# enhance_with_llm_stream ํธ์ถ
|
363 |
for chunk in enhance_with_llm_stream(base_desc, obj_name, category):
|
364 |
if chunk.startswith("[Thinking Chunk]"):
|
365 |
-
# ์๊ฐ
|
366 |
thinking_text += chunk.replace("[Thinking Chunk]", "")
|
367 |
-
|
368 |
-
yield [("assistant", messages_to_user)]
|
369 |
elif chunk.startswith("[Response Start]"):
|
370 |
-
# ์๋ต ์์
|
371 |
is_thinking_done = True
|
372 |
-
# ๋จ์์๋ ๋ถ๋ถ์ response_text๋ก
|
373 |
partial = chunk.replace("[Response Start]", "")
|
374 |
response_text += partial
|
375 |
-
|
376 |
-
yield [("assistant", messages_to_user)]
|
377 |
elif chunk.startswith("[Final Response]"):
|
378 |
-
# ์ต์ข
|
379 |
final = chunk.replace("[Final Response]", "")
|
380 |
response_text += f"\n{final}"
|
381 |
yield [("assistant", f"**[์ต์ข
Response]**\n{response_text.strip()}")]
|
@@ -383,15 +369,14 @@ def process_inputs_stream(text1, text2, text3) -> Iterator[list]:
|
|
383 |
# ์ผ๋ฐ ์๋ต ์คํธ๋ฆฌ๋ฐ
|
384 |
if is_thinking_done:
|
385 |
response_text += chunk
|
386 |
-
yield [("assistant", f"**[์๋ต ์งํ]**\n{response_text}")
|
387 |
else:
|
388 |
thinking_text += chunk
|
389 |
yield [("assistant", f"**[Thinking]**\n{thinking_text}")]
|
390 |
|
391 |
-
# ํ ์นดํ
๊ณ ๋ฆฌ ์๋ต ์๋ฃ
|
392 |
result_dict["enhanced"] = response_text
|
393 |
|
394 |
-
# 3)
|
395 |
yield [("assistant", "**๋ชจ๋ ์นดํ
๊ณ ๋ฆฌ์ ๋ํ ์คํธ๋ฆฌ๋ฐ์ด ์๋ฃ๋์์ต๋๋ค!**")]
|
396 |
|
397 |
|
@@ -404,9 +389,10 @@ with gr.Blocks(title="์คํธ๋ฆฌ๋ฐ ์์ : Gemini 2.0 Flash Thinking",
|
|
404 |
gr.Markdown("# ๐ ํค์๋ ๊ธฐ๋ฐ ์ฐฝ์์ ๋ณํ ์์ด๋์ด (Gemini 2.0 Flash Thinking, Streaming)")
|
405 |
gr.Markdown("ํค์๋ 1~3๊ฐ๋ฅผ ์
๋ ฅํ๋ฉด, **์นดํ
๊ณ ๋ฆฌ๋ณ๋ก** 'Thinking'๊ณผ 'Response'๊ฐ ์ค์๊ฐ ์คํธ๋ฆฌ๋ฐ๋ฉ๋๋ค.")
|
406 |
|
|
|
407 |
chatbot = gr.Chatbot(
|
408 |
label="์นดํ
๊ณ ๋ฆฌ๋ณ ์์ด๋์ด(Thinking + Response) ์คํธ๋ฆฌ๋ฐ",
|
409 |
-
type="
|
410 |
render_markdown=True
|
411 |
)
|
412 |
|
@@ -420,8 +406,7 @@ with gr.Blocks(title="์คํธ๋ฆฌ๋ฐ ์์ : Gemini 2.0 Flash Thinking",
|
|
420 |
clear_button = gr.Button("๋ํ ์ง์ฐ๊ธฐ")
|
421 |
|
422 |
with gr.Column(scale=2):
|
423 |
-
# ์ด๋ฏธ chatbot์ด
|
424 |
-
pass
|
425 |
|
426 |
def clear_chat():
|
427 |
return []
|
@@ -434,11 +419,12 @@ with gr.Blocks(title="์คํธ๋ฆฌ๋ฐ ์์ : Gemini 2.0 Flash Thinking",
|
|
434 |
]
|
435 |
gr.Examples(examples=examples, inputs=[text_input1, text_input2, text_input3])
|
436 |
|
|
|
437 |
submit_button.click(
|
438 |
fn=process_inputs_stream,
|
439 |
inputs=[text_input1, text_input2, text_input3],
|
440 |
outputs=chatbot,
|
441 |
-
stream=True #
|
442 |
)
|
443 |
|
444 |
clear_button.click(
|
@@ -448,4 +434,3 @@ with gr.Blocks(title="์คํธ๋ฆฌ๋ฐ ์์ : Gemini 2.0 Flash Thinking",
|
|
448 |
|
449 |
if __name__ == "__main__":
|
450 |
demo.launch(debug=True)
|
451 |
-
|
|
|
|
|
|
|
1 |
import os
|
2 |
import gradio as gr
|
3 |
import random
|
|
|
25 |
# ์ฌ์ฉํ Gemini 2.0 Flash ๋ชจ๋ธ (Thinking ๊ธฐ๋ฅ ํฌํจ)
|
26 |
model = genai.GenerativeModel("gemini-2.0-flash-thinking-exp-01-21")
|
27 |
|
28 |
+
|
29 |
##############################################################################
|
30 |
# ๋ณํ ๋ฌธ์์ด์์ ์ฌ๋์("/")๋ก ๊ตฌ๋ถ๋ ๋ ์ต์
์ค ํ๋ ์ ํ
|
31 |
##############################################################################
|
|
|
50 |
else:
|
51 |
return random.choice([left, right])
|
52 |
|
53 |
+
|
54 |
##############################################################################
|
55 |
+
# ์นดํ
๊ณ ๋ฆฌ ์ฌ์
|
56 |
+
# (์๋ ์์์์๋ ๋ชจ๋ ์นดํ
๊ณ ๋ฆฌ๋ฅผ ํฌํจ์์ผฐ์ง๋ง, ํ์์ ๋ฐ๋ผ ๋ฒ์๋ฅผ ์กฐ์ ํ์ธ์.)
|
57 |
##############################################################################
|
|
|
58 |
physical_transformation_categories = {
|
59 |
"๊ณต๊ฐ ์ด๋": [
|
60 |
"์/๋ค ์ด๋", "์ข/์ฐ ์ด๋", "์/์๋ ์ด๋", "์ธ๋ก์ถ ํ์ (๊ณ ๊ฐ ๋๋์)",
|
|
|
195 |
"๋ผ์ด๋ค ์ผ์/๊ฐ์ง", "ํฐ์น ์ผ์/๊ฐ์ง", "์ ์ค์ฒ ์ผ์/๊ฐ์ง", "์ฌ๋ฐ ์ผ์/๊ฐ์ง", "ํ์ ์ผ์/๊ฐ์ง"
|
196 |
]
|
197 |
}
|
198 |
+
|
199 |
+
|
200 |
##############################################################################
|
201 |
# ์คํธ๋ฆฌ๋ฐ์ฉ Gemini API ํจ์:
|
202 |
# - 'Thinking' ๋จ๊ณ(์์ด๋์ด ๋ด๋ถ ์ถ๋ก )์ ์ต์ข
'Response' ๋จ๊ณ๋ก ๊ตฌ์ฑ
|
|
|
215 |
thinking_complete = False
|
216 |
|
217 |
for chunk in response:
|
|
|
218 |
parts = chunk.candidates[0].content.parts
|
219 |
|
220 |
+
# parts๊ฐ 2๊ฐ์ด๋ฉด (0: Thinking, 1: Response ์์)
|
|
|
221 |
if len(parts) == 2 and not thinking_complete:
|
|
|
222 |
thought_buffer += parts[0].text
|
223 |
yield f"[Thinking Chunk] {parts[0].text}"
|
224 |
|
|
|
227 |
|
228 |
thinking_complete = True
|
229 |
elif thinking_complete:
|
230 |
+
# Already in response phase
|
231 |
current_chunk = parts[0].text
|
232 |
response_buffer += current_chunk
|
233 |
yield current_chunk
|
234 |
else:
|
235 |
+
# Still in thinking phase
|
236 |
current_chunk = parts[0].text
|
237 |
thought_buffer += current_chunk
|
238 |
yield f"[Thinking Chunk] {current_chunk}"
|
239 |
|
240 |
+
# ์คํธ๋ฆฌ๋ฐ ์๋ฃ ํ ์ต์ข
๊ฒฐ๊ณผ ํ๋ฒ์ ์ ๊ณต
|
241 |
yield f"\n[Final Response]\n{response_buffer}"
|
242 |
|
243 |
+
|
244 |
##############################################################################
|
245 |
# ์นดํ
๊ณ ๋ฆฌ๋ณ ๊ฐ๋จ ์ค๋ช
์ 'Thinking' + 'Response'๋ก ํ์ฅ (์คํธ๋ฆฌ๋ฐ)
|
246 |
##############################################################################
|
247 |
def enhance_with_llm_stream(base_description, obj_name, category) -> Iterator[str]:
|
|
|
|
|
|
|
|
|
248 |
prompt = f"""
|
249 |
๋ค์์ '{obj_name}'์ '{category}' ๊ด๋ จ ๊ฐ๋จํ ์ค๋ช
์
๋๋ค:
|
250 |
"{base_description}"
|
|
|
253 |
2) ํ์ ํฌ์ธํธ์ ๊ธฐ๋ฅ์ฑ ๋ฑ์ ์ค์ฌ์ผ๋ก
|
254 |
3~4๋ฌธ์ฅ์ ์์ด๋์ด๋ก ํ์ฅํด ์ฃผ์ธ์.
|
255 |
"""
|
|
|
256 |
for chunk in query_gemini_api_stream(prompt):
|
257 |
yield chunk
|
258 |
|
259 |
+
|
260 |
##############################################################################
|
261 |
# ํ ํค์๋(์ค๋ธ์ ํธ)์ ๋ํ ๊ธฐ๋ณธ ์์ด๋์ด(์นดํ
๊ณ ๋ฆฌ๋ณ) ์์ฑ
|
262 |
##############################################################################
|
|
|
298 |
results[category] = {"base": base_description, "enhanced": ""}
|
299 |
return results
|
300 |
|
301 |
+
|
302 |
##############################################################################
|
303 |
# ์ค์ ๋ณํ ์์ฑ ๋ก์ง
|
304 |
##############################################################################
|
|
|
319 |
##############################################################################
|
320 |
def process_inputs_stream(text1, text2, text3) -> Iterator[list]:
|
321 |
"""
|
322 |
+
Gradio์ Chatbot ์ปดํฌ๋ํธ์ ๋ง์ถฐ์
|
323 |
+
[(role='assistant', content=...)] ํํ์ ๋ฆฌ์คํธ๋ฅผ yield.
|
|
|
324 |
"""
|
|
|
|
|
325 |
# 1) ์
๋ ฅ๊ฐ ํ์ธ
|
326 |
yield [("assistant", "์
๋ ฅ๊ฐ ํ์ธ ์ค...")]
|
327 |
time.sleep(0.3)
|
|
|
334 |
return
|
335 |
|
336 |
# 2) ์์ด๋์ด ์์ฑ
|
337 |
+
yield [("assistant", "์ฐฝ์์ ์ธ ๋ชจ๋ธ/์ปจ์
/ํ์ ๋ณํ ์์ด๋์ด ์์ฑ ์ค... (์นดํ
๊ณ ๋ฆฌ๋ณ ๋ถ์)") ]
|
338 |
time.sleep(0.3)
|
339 |
results, objects = generate_transformations(text1, text2, text3)
|
|
|
|
|
340 |
obj_name = " ๋ฐ ".join([obj for obj in objects if obj])
|
341 |
|
342 |
+
# ์นดํ
๊ณ ๋ฆฌ๋ณ ์คํธ๋ฆฌ๋ฐ ์ฒ๋ฆฌ
|
343 |
for i, (category, result_dict) in enumerate(results.items(), start=1):
|
344 |
base_desc = result_dict["base"]
|
|
|
|
|
345 |
yield [("assistant", f"**[{i}/{len(results)}] ์นดํ
๊ณ ๋ฆฌ:** {category}\n\n๊ธฐ๋ณธ ์์ด๋์ด: {base_desc}\n\n์ง๊ธ๋ถํฐ Thinking + Response๋ฅผ ๋จ๊ณ์ ์ผ๋ก ์คํธ๋ฆฌ๋ฐํฉ๋๋ค...")]
|
346 |
time.sleep(0.5)
|
347 |
|
|
|
348 |
thinking_text = ""
|
349 |
response_text = ""
|
350 |
is_thinking_done = False
|
351 |
|
|
|
352 |
for chunk in enhance_with_llm_stream(base_desc, obj_name, category):
|
353 |
if chunk.startswith("[Thinking Chunk]"):
|
354 |
+
# ์๊ฐ ๋จ๊ณ
|
355 |
thinking_text += chunk.replace("[Thinking Chunk]", "")
|
356 |
+
yield [("assistant", f"**[Thinking]**\n{thinking_text}")]
|
|
|
357 |
elif chunk.startswith("[Response Start]"):
|
358 |
+
# ์๋ต ์์
|
359 |
is_thinking_done = True
|
|
|
360 |
partial = chunk.replace("[Response Start]", "")
|
361 |
response_text += partial
|
362 |
+
yield [("assistant", f"**[Final Response ์์]**\n{partial}")]
|
|
|
363 |
elif chunk.startswith("[Final Response]"):
|
364 |
+
# ์๋ต ์ต์ข
|
365 |
final = chunk.replace("[Final Response]", "")
|
366 |
response_text += f"\n{final}"
|
367 |
yield [("assistant", f"**[์ต์ข
Response]**\n{response_text.strip()}")]
|
|
|
369 |
# ์ผ๋ฐ ์๋ต ์คํธ๋ฆฌ๋ฐ
|
370 |
if is_thinking_done:
|
371 |
response_text += chunk
|
372 |
+
yield [("assistant", f"**[์๋ต ์งํ]**\n{response_text}")]
|
373 |
else:
|
374 |
thinking_text += chunk
|
375 |
yield [("assistant", f"**[Thinking]**\n{thinking_text}")]
|
376 |
|
|
|
377 |
result_dict["enhanced"] = response_text
|
378 |
|
379 |
+
# 3) ์๋ฃ ๋ฉ์์ง
|
380 |
yield [("assistant", "**๋ชจ๋ ์นดํ
๊ณ ๋ฆฌ์ ๋ํ ์คํธ๋ฆฌ๋ฐ์ด ์๋ฃ๋์์ต๋๋ค!**")]
|
381 |
|
382 |
|
|
|
389 |
gr.Markdown("# ๐ ํค์๋ ๊ธฐ๋ฐ ์ฐฝ์์ ๋ณํ ์์ด๋์ด (Gemini 2.0 Flash Thinking, Streaming)")
|
390 |
gr.Markdown("ํค์๋ 1~3๊ฐ๋ฅผ ์
๋ ฅํ๋ฉด, **์นดํ
๊ณ ๋ฆฌ๋ณ๋ก** 'Thinking'๊ณผ 'Response'๊ฐ ์ค์๊ฐ ์คํธ๋ฆฌ๋ฐ๋ฉ๋๋ค.")
|
391 |
|
392 |
+
# Chatbot์์ type="tuples"๋ก ์์
|
393 |
chatbot = gr.Chatbot(
|
394 |
label="์นดํ
๊ณ ๋ฆฌ๋ณ ์์ด๋์ด(Thinking + Response) ์คํธ๋ฆฌ๋ฐ",
|
395 |
+
type="tuples", # <-- "tuple"์ด ์๋๋ผ "tuples"๋ก ์์
|
396 |
render_markdown=True
|
397 |
)
|
398 |
|
|
|
406 |
clear_button = gr.Button("๋ํ ์ง์ฐ๊ธฐ")
|
407 |
|
408 |
with gr.Column(scale=2):
|
409 |
+
pass # ์ด๋ฏธ chatbot์ด ์ฐ์ธก ์์ญ์ ํ ๋น๋จ
|
|
|
410 |
|
411 |
def clear_chat():
|
412 |
return []
|
|
|
419 |
]
|
420 |
gr.Examples(examples=examples, inputs=[text_input1, text_input2, text_input3])
|
421 |
|
422 |
+
# ์คํธ๋ฆฌ๋ฐ ์ฒ๋ฆฌ
|
423 |
submit_button.click(
|
424 |
fn=process_inputs_stream,
|
425 |
inputs=[text_input1, text_input2, text_input3],
|
426 |
outputs=chatbot,
|
427 |
+
stream=True # stream=True๋ก ์ค์๊ฐ ์คํธ๋ฆฌ๋ฐ
|
428 |
)
|
429 |
|
430 |
clear_button.click(
|
|
|
434 |
|
435 |
if __name__ == "__main__":
|
436 |
demo.launch(debug=True)
|
|