aiqcamp commited on
Commit
1c3714b
ยท
verified ยท
1 Parent(s): 0495fd2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +29 -44
app.py CHANGED
@@ -1,5 +1,3 @@
1
-
2
-
3
  import os
4
  import gradio as gr
5
  import random
@@ -27,6 +25,7 @@ genai.configure(api_key=GEMINI_API_KEY)
27
  # ์‚ฌ์šฉํ•  Gemini 2.0 Flash ๋ชจ๋ธ (Thinking ๊ธฐ๋Šฅ ํฌํ•จ)
28
  model = genai.GenerativeModel("gemini-2.0-flash-thinking-exp-01-21")
29
 
 
30
  ##############################################################################
31
  # ๋ณ€ํ™˜ ๋ฌธ์ž์—ด์—์„œ ์Šฌ๋ž˜์‹œ("/")๋กœ ๊ตฌ๋ถ„๋œ ๋‘ ์˜ต์…˜ ์ค‘ ํ•˜๋‚˜ ์„ ํƒ
32
  ##############################################################################
@@ -51,10 +50,11 @@ def choose_alternative(transformation):
51
  else:
52
  return random.choice([left, right])
53
 
 
54
  ##############################################################################
55
- # ์นดํ…Œ๊ณ ๋ฆฌ ์‚ฌ์ „ (์ผ๋ถ€๋งŒ ๋ฐœ์ทŒ ๊ฐ€๋Šฅ. ์—ฌ๊ธฐ์„œ๋Š” ์˜ˆ์‹œ๋กœ 3๊ฐœ๋งŒ ์œ ์ง€)
 
56
  ##############################################################################
57
-
58
  physical_transformation_categories = {
59
  "๊ณต๊ฐ„ ์ด๋™": [
60
  "์•ž/๋’ค ์ด๋™", "์ขŒ/์šฐ ์ด๋™", "์œ„/์•„๋ž˜ ์ด๋™", "์„ธ๋กœ์ถ• ํšŒ์ „(๊ณ ๊ฐœ ๋„๋•์ž„)",
@@ -195,6 +195,8 @@ physical_transformation_categories = {
195
  "๋ผ์ด๋‹ค ์„ผ์„œ/๊ฐ์ง€", "ํ„ฐ์น˜ ์„ผ์„œ/๊ฐ์ง€", "์ œ์Šค์ฒ˜ ์„ผ์„œ/๊ฐ์ง€", "์‹ฌ๋ฐ• ์„ผ์„œ/๊ฐ์ง€", "ํ˜ˆ์•• ์„ผ์„œ/๊ฐ์ง€"
196
  ]
197
  }
 
 
198
  ##############################################################################
199
  # ์ŠคํŠธ๋ฆฌ๋ฐ์šฉ Gemini API ํ•จ์ˆ˜:
200
  # - 'Thinking' ๋‹จ๊ณ„(์•„์ด๋””์–ด ๋‚ด๋ถ€ ์ถ”๋ก )์™€ ์ตœ์ข… 'Response' ๋‹จ๊ณ„๋กœ ๊ตฌ์„ฑ
@@ -213,13 +215,10 @@ def query_gemini_api_stream(prompt: str) -> Iterator[str]:
213
  thinking_complete = False
214
 
215
  for chunk in response:
216
- # ๊ฐ chunk์—๋Š” candidates[0].content.parts๊ฐ€ ๋“ค์–ด์žˆ๋‹ค
217
  parts = chunk.candidates[0].content.parts
218
 
219
- # ์˜ˆ์‹œ) parts๊ฐ€ 2๊ฐœ์ด๋ฉด (0: Thinking, 1: Response ์‹œ์ž‘)
220
- # ๊ทธ ์™ธ์—๋Š” 1๊ฐœ์”ฉ ๋Š์–ด์„œ ๋“ค์–ด์˜ฌ ์ˆ˜ ์žˆ์Œ
221
  if len(parts) == 2 and not thinking_complete:
222
- # ์•„์ง Thinking ์ค‘์ธ๋ฐ, ์™„์„ฑ๋œ Thinking + Response ์‹œ์ž‘์ด ํ•œ ๋ฒˆ์— ์˜ด
223
  thought_buffer += parts[0].text
224
  yield f"[Thinking Chunk] {parts[0].text}"
225
 
@@ -228,27 +227,24 @@ def query_gemini_api_stream(prompt: str) -> Iterator[str]:
228
 
229
  thinking_complete = True
230
  elif thinking_complete:
231
- # ์ด๋ฏธ Thinking์€ ๋๋‚จ โ†’ Response๋ฅผ ์ด์–ด์„œ ์ŠคํŠธ๋ฆฌ๋ฐ
232
  current_chunk = parts[0].text
233
  response_buffer += current_chunk
234
  yield current_chunk
235
  else:
236
- # Thinking ์ง„ํ–‰ ์ค‘ (parts๊ฐ€ 1๊ฐœ์”ฉ ์ถ”๊ฐ€๋จ)
237
  current_chunk = parts[0].text
238
  thought_buffer += current_chunk
239
  yield f"[Thinking Chunk] {current_chunk}"
240
 
241
- # ์ŠคํŠธ๋ฆฌ๋ฐ ์™„๋ฃŒ ํ›„ ์ตœ์ข… ๊ฒฐ๊ณผ ํ•œ๋ฒˆ์— ์ œ๊ณตํ•  ์ˆ˜๋„ ์žˆ์Œ
242
  yield f"\n[Final Response]\n{response_buffer}"
243
 
 
244
  ##############################################################################
245
  # ์นดํ…Œ๊ณ ๋ฆฌ๋ณ„ ๊ฐ„๋‹จ ์„ค๋ช…์„ 'Thinking' + 'Response'๋กœ ํ™•์žฅ (์ŠคํŠธ๋ฆฌ๋ฐ)
246
  ##############################################################################
247
  def enhance_with_llm_stream(base_description, obj_name, category) -> Iterator[str]:
248
- """
249
- ๊ธฐ์กด enhance_with_llm๋ฅผ ์ŠคํŠธ๋ฆฌ๋ฐ ํ˜•ํƒœ๋กœ ๋ฐ”๊พผ ํ•จ์ˆ˜:
250
- 'Thinking' + 'Response' ๋‹จ๊ณ„๋ฅผ chunk๋กœ ์ˆœ์ฐจ ์ „๋‹ฌ
251
- """
252
  prompt = f"""
253
  ๋‹ค์Œ์€ '{obj_name}'์˜ '{category}' ๊ด€๋ จ ๊ฐ„๋‹จํ•œ ์„ค๋ช…์ž…๋‹ˆ๋‹ค:
254
  "{base_description}"
@@ -257,10 +253,10 @@ def enhance_with_llm_stream(base_description, obj_name, category) -> Iterator[st
257
  2) ํ˜์‹  ํฌ์ธํŠธ์™€ ๊ธฐ๋Šฅ์„ฑ ๋“ฑ์„ ์ค‘์‹ฌ์œผ๋กœ
258
  3~4๋ฌธ์žฅ์˜ ์•„์ด๋””์–ด๋กœ ํ™•์žฅํ•ด ์ฃผ์„ธ์š”.
259
  """
260
- # query_gemini_api_stream()๋กœ๋ถ€ํ„ฐ chunk๋ฅผ ๋ฐ›์•„ ๊ทธ๋Œ€๋กœ yield
261
  for chunk in query_gemini_api_stream(prompt):
262
  yield chunk
263
 
 
264
  ##############################################################################
265
  # ํ•œ ํ‚ค์›Œ๋“œ(์˜ค๋ธŒ์ ํŠธ)์— ๋Œ€ํ•œ ๊ธฐ๋ณธ ์•„์ด๋””์–ด(์นดํ…Œ๊ณ ๋ฆฌ๋ณ„) ์ƒ์„ฑ
266
  ##############################################################################
@@ -302,6 +298,7 @@ def generate_three_objects_interaction(obj1, obj2, obj3):
302
  results[category] = {"base": base_description, "enhanced": ""}
303
  return results
304
 
 
305
  ##############################################################################
306
  # ์‹ค์ œ ๋ณ€ํ™˜ ์ƒ์„ฑ ๋กœ์ง
307
  ##############################################################################
@@ -322,12 +319,9 @@ def generate_transformations(text1, text2=None, text3=None):
322
  ##############################################################################
323
  def process_inputs_stream(text1, text2, text3) -> Iterator[list]:
324
  """
325
- Gradio์˜ Chatbot ํ˜•์‹์— ๋งž์ถฐ์„œ,
326
- [(role='assistant'|'user', content=...), ...] ํ˜•ํƒœ๋กœ yieldํ•œ๋‹ค.
327
- ์ƒ๊ฐ(Thinking) ๋‹จ๊ณ„์™€ ์ตœ์ข… ์‘๋‹ต์„ ๋ถ„๋ฆฌํ•ด์„œ ์‹ค์‹œ๊ฐ„ ์ „์†ก.
328
  """
329
- messages = []
330
-
331
  # 1) ์ž…๋ ฅ๊ฐ’ ํ™•์ธ
332
  yield [("assistant", "์ž…๋ ฅ๊ฐ’ ํ™•์ธ ์ค‘...")]
333
  time.sleep(0.3)
@@ -340,42 +334,34 @@ def process_inputs_stream(text1, text2, text3) -> Iterator[list]:
340
  return
341
 
342
  # 2) ์•„์ด๋””์–ด ์ƒ์„ฑ
343
- yield [("assistant", "์ฐฝ์˜์ ์ธ ๋ชจ๋ธ/์ปจ์…‰/ํ˜•์ƒ ๋ณ€ํ™” ์•„์ด๋””์–ด ์ƒ์„ฑ ์ค‘... (์นดํ…Œ๊ณ ๋ฆฌ๋ณ„ ๋ถ„์„)")]
344
  time.sleep(0.3)
345
  results, objects = generate_transformations(text1, text2, text3)
346
-
347
- # ์นดํ…Œ๊ณ ๋ฆฌ๋ณ„ ์ŠคํŠธ๋ฆฌ๋ฐ ์ฒ˜๋ฆฌ
348
  obj_name = " ๋ฐ ".join([obj for obj in objects if obj])
349
 
 
350
  for i, (category, result_dict) in enumerate(results.items(), start=1):
351
  base_desc = result_dict["base"]
352
-
353
- # ์นดํ…Œ๊ณ ๋ฆฌ ์•ˆ๋‚ด ์ถœ๋ ฅ
354
  yield [("assistant", f"**[{i}/{len(results)}] ์นดํ…Œ๊ณ ๋ฆฌ:** {category}\n\n๊ธฐ๋ณธ ์•„์ด๋””์–ด: {base_desc}\n\n์ง€๊ธˆ๋ถ€ํ„ฐ Thinking + Response๋ฅผ ๋‹จ๊ณ„์ ์œผ๋กœ ์ŠคํŠธ๋ฆฌ๋ฐํ•ฉ๋‹ˆ๋‹ค...")]
355
  time.sleep(0.5)
356
 
357
- # ์ŠคํŠธ๋ฆฌ๋ฐ LLM ํ˜ธ์ถœ
358
  thinking_text = ""
359
  response_text = ""
360
  is_thinking_done = False
361
 
362
- # enhance_with_llm_stream ํ˜ธ์ถœ
363
  for chunk in enhance_with_llm_stream(base_desc, obj_name, category):
364
  if chunk.startswith("[Thinking Chunk]"):
365
- # ์ƒ๊ฐ ํŒŒํŠธ
366
  thinking_text += chunk.replace("[Thinking Chunk]", "")
367
- messages_to_user = f"**[Thinking]**\n{thinking_text}"
368
- yield [("assistant", messages_to_user)]
369
  elif chunk.startswith("[Response Start]"):
370
- # ์‘๋‹ต ์‹œ์ž‘ ์‹œ์ 
371
  is_thinking_done = True
372
- # ๋‚จ์•„์žˆ๋Š” ๋ถ€๋ถ„์€ response_text๋กœ
373
  partial = chunk.replace("[Response Start]", "")
374
  response_text += partial
375
- messages_to_user = f"**[Final Response ์‹œ์ž‘]**\n{partial}"
376
- yield [("assistant", messages_to_user)]
377
  elif chunk.startswith("[Final Response]"):
378
- # ์ตœ์ข… ์ข…๋ฃŒ
379
  final = chunk.replace("[Final Response]", "")
380
  response_text += f"\n{final}"
381
  yield [("assistant", f"**[์ตœ์ข… Response]**\n{response_text.strip()}")]
@@ -383,15 +369,14 @@ def process_inputs_stream(text1, text2, text3) -> Iterator[list]:
383
  # ์ผ๋ฐ˜ ์‘๋‹ต ์ŠคํŠธ๋ฆฌ๋ฐ
384
  if is_thinking_done:
385
  response_text += chunk
386
- yield [("assistant", f"**[์‘๋‹ต ์ง„ํ–‰]**\n{response_text}") ]
387
  else:
388
  thinking_text += chunk
389
  yield [("assistant", f"**[Thinking]**\n{thinking_text}")]
390
 
391
- # ํ•œ ์นดํ…Œ๊ณ ๋ฆฌ ์‘๋‹ต ์™„๋ฃŒ
392
  result_dict["enhanced"] = response_text
393
 
394
- # 3) ์ „์ฒด ์นดํ…Œ๊ณ ๋ฆฌ ์™„๋ฃŒ
395
  yield [("assistant", "**๋ชจ๋“  ์นดํ…Œ๊ณ ๋ฆฌ์— ๋Œ€ํ•œ ์ŠคํŠธ๋ฆฌ๋ฐ์ด ์™„๋ฃŒ๋˜์—ˆ์Šต๋‹ˆ๋‹ค!**")]
396
 
397
 
@@ -404,9 +389,10 @@ with gr.Blocks(title="์ŠคํŠธ๋ฆฌ๋ฐ ์˜ˆ์ œ: Gemini 2.0 Flash Thinking",
404
  gr.Markdown("# ๐Ÿš€ ํ‚ค์›Œ๋“œ ๊ธฐ๋ฐ˜ ์ฐฝ์˜์  ๋ณ€ํ™” ์•„์ด๋””์–ด (Gemini 2.0 Flash Thinking, Streaming)")
405
  gr.Markdown("ํ‚ค์›Œ๋“œ 1~3๊ฐœ๋ฅผ ์ž…๋ ฅํ•˜๋ฉด, **์นดํ…Œ๊ณ ๋ฆฌ๋ณ„๋กœ** 'Thinking'๊ณผ 'Response'๊ฐ€ ์‹ค์‹œ๊ฐ„ ์ŠคํŠธ๋ฆฌ๋ฐ๋ฉ๋‹ˆ๋‹ค.")
406
 
 
407
  chatbot = gr.Chatbot(
408
  label="์นดํ…Œ๊ณ ๋ฆฌ๋ณ„ ์•„์ด๋””์–ด(Thinking + Response) ์ŠคํŠธ๋ฆฌ๋ฐ",
409
- type="tuple", # (role, content) ์Œ์˜ ๋ฆฌ์ŠคํŠธ๋กœ ์ „๋‹ฌ
410
  render_markdown=True
411
  )
412
 
@@ -420,8 +406,7 @@ with gr.Blocks(title="์ŠคํŠธ๋ฆฌ๋ฐ ์˜ˆ์ œ: Gemini 2.0 Flash Thinking",
420
  clear_button = gr.Button("๋Œ€ํ™” ์ง€์šฐ๊ธฐ")
421
 
422
  with gr.Column(scale=2):
423
- # ์ด๋ฏธ chatbot์ด ์ž๋ฆฌ๋ฅผ ์ฐจ์ง€ํ•˜๋ฏ€๋กœ ํŒจ์Šค
424
- pass
425
 
426
  def clear_chat():
427
  return []
@@ -434,11 +419,12 @@ with gr.Blocks(title="์ŠคํŠธ๋ฆฌ๋ฐ ์˜ˆ์ œ: Gemini 2.0 Flash Thinking",
434
  ]
435
  gr.Examples(examples=examples, inputs=[text_input1, text_input2, text_input3])
436
 
 
437
  submit_button.click(
438
  fn=process_inputs_stream,
439
  inputs=[text_input1, text_input2, text_input3],
440
  outputs=chatbot,
441
- stream=True # ์ŠคํŠธ๏ฟฝ๏ฟฝ๋ฐ ์ถœ๋ ฅ
442
  )
443
 
444
  clear_button.click(
@@ -448,4 +434,3 @@ with gr.Blocks(title="์ŠคํŠธ๋ฆฌ๋ฐ ์˜ˆ์ œ: Gemini 2.0 Flash Thinking",
448
 
449
  if __name__ == "__main__":
450
  demo.launch(debug=True)
451
-
 
 
 
1
  import os
2
  import gradio as gr
3
  import random
 
25
  # ์‚ฌ์šฉํ•  Gemini 2.0 Flash ๋ชจ๋ธ (Thinking ๊ธฐ๋Šฅ ํฌํ•จ)
26
  model = genai.GenerativeModel("gemini-2.0-flash-thinking-exp-01-21")
27
 
28
+
29
  ##############################################################################
30
  # ๋ณ€ํ™˜ ๋ฌธ์ž์—ด์—์„œ ์Šฌ๋ž˜์‹œ("/")๋กœ ๊ตฌ๋ถ„๋œ ๋‘ ์˜ต์…˜ ์ค‘ ํ•˜๋‚˜ ์„ ํƒ
31
  ##############################################################################
 
50
  else:
51
  return random.choice([left, right])
52
 
53
+
54
  ##############################################################################
55
+ # ์นดํ…Œ๊ณ ๋ฆฌ ์‚ฌ์ „
56
+ # (์•„๋ž˜ ์˜ˆ์‹œ์—์„œ๋Š” ๋ชจ๋“  ์นดํ…Œ๊ณ ๋ฆฌ๋ฅผ ํฌํ•จ์‹œ์ผฐ์ง€๋งŒ, ํ•„์š”์— ๋”ฐ๋ผ ๋ฒ”์œ„๋ฅผ ์กฐ์ •ํ•˜์„ธ์š”.)
57
  ##############################################################################
 
58
  physical_transformation_categories = {
59
  "๊ณต๊ฐ„ ์ด๋™": [
60
  "์•ž/๋’ค ์ด๋™", "์ขŒ/์šฐ ์ด๋™", "์œ„/์•„๋ž˜ ์ด๋™", "์„ธ๋กœ์ถ• ํšŒ์ „(๊ณ ๊ฐœ ๋„๋•์ž„)",
 
195
  "๋ผ์ด๋‹ค ์„ผ์„œ/๊ฐ์ง€", "ํ„ฐ์น˜ ์„ผ์„œ/๊ฐ์ง€", "์ œ์Šค์ฒ˜ ์„ผ์„œ/๊ฐ์ง€", "์‹ฌ๋ฐ• ์„ผ์„œ/๊ฐ์ง€", "ํ˜ˆ์•• ์„ผ์„œ/๊ฐ์ง€"
196
  ]
197
  }
198
+
199
+
200
  ##############################################################################
201
  # ์ŠคํŠธ๋ฆฌ๋ฐ์šฉ Gemini API ํ•จ์ˆ˜:
202
  # - 'Thinking' ๋‹จ๊ณ„(์•„์ด๋””์–ด ๋‚ด๋ถ€ ์ถ”๋ก )์™€ ์ตœ์ข… 'Response' ๋‹จ๊ณ„๋กœ ๊ตฌ์„ฑ
 
215
  thinking_complete = False
216
 
217
  for chunk in response:
 
218
  parts = chunk.candidates[0].content.parts
219
 
220
+ # parts๊ฐ€ 2๊ฐœ์ด๋ฉด (0: Thinking, 1: Response ์‹œ์ž‘)
 
221
  if len(parts) == 2 and not thinking_complete:
 
222
  thought_buffer += parts[0].text
223
  yield f"[Thinking Chunk] {parts[0].text}"
224
 
 
227
 
228
  thinking_complete = True
229
  elif thinking_complete:
230
+ # Already in response phase
231
  current_chunk = parts[0].text
232
  response_buffer += current_chunk
233
  yield current_chunk
234
  else:
235
+ # Still in thinking phase
236
  current_chunk = parts[0].text
237
  thought_buffer += current_chunk
238
  yield f"[Thinking Chunk] {current_chunk}"
239
 
240
+ # ์ŠคํŠธ๋ฆฌ๋ฐ ์™„๋ฃŒ ํ›„ ์ตœ์ข… ๊ฒฐ๊ณผ ํ•œ๋ฒˆ์— ์ œ๊ณต
241
  yield f"\n[Final Response]\n{response_buffer}"
242
 
243
+
244
  ##############################################################################
245
  # ์นดํ…Œ๊ณ ๋ฆฌ๋ณ„ ๊ฐ„๋‹จ ์„ค๋ช…์„ 'Thinking' + 'Response'๋กœ ํ™•์žฅ (์ŠคํŠธ๋ฆฌ๋ฐ)
246
  ##############################################################################
247
  def enhance_with_llm_stream(base_description, obj_name, category) -> Iterator[str]:
 
 
 
 
248
  prompt = f"""
249
  ๋‹ค์Œ์€ '{obj_name}'์˜ '{category}' ๊ด€๋ จ ๊ฐ„๋‹จํ•œ ์„ค๋ช…์ž…๋‹ˆ๋‹ค:
250
  "{base_description}"
 
253
  2) ํ˜์‹  ํฌ์ธํŠธ์™€ ๊ธฐ๋Šฅ์„ฑ ๋“ฑ์„ ์ค‘์‹ฌ์œผ๋กœ
254
  3~4๋ฌธ์žฅ์˜ ์•„์ด๋””์–ด๋กœ ํ™•์žฅํ•ด ์ฃผ์„ธ์š”.
255
  """
 
256
  for chunk in query_gemini_api_stream(prompt):
257
  yield chunk
258
 
259
+
260
  ##############################################################################
261
  # ํ•œ ํ‚ค์›Œ๋“œ(์˜ค๋ธŒ์ ํŠธ)์— ๋Œ€ํ•œ ๊ธฐ๋ณธ ์•„์ด๋””์–ด(์นดํ…Œ๊ณ ๋ฆฌ๋ณ„) ์ƒ์„ฑ
262
  ##############################################################################
 
298
  results[category] = {"base": base_description, "enhanced": ""}
299
  return results
300
 
301
+
302
  ##############################################################################
303
  # ์‹ค์ œ ๋ณ€ํ™˜ ์ƒ์„ฑ ๋กœ์ง
304
  ##############################################################################
 
319
  ##############################################################################
320
  def process_inputs_stream(text1, text2, text3) -> Iterator[list]:
321
  """
322
+ Gradio์˜ Chatbot ์ปดํฌ๋„ŒํŠธ์— ๋งž์ถฐ์„œ
323
+ [(role='assistant', content=...)] ํ˜•ํƒœ์˜ ๋ฆฌ์ŠคํŠธ๋ฅผ yield.
 
324
  """
 
 
325
  # 1) ์ž…๋ ฅ๊ฐ’ ํ™•์ธ
326
  yield [("assistant", "์ž…๋ ฅ๊ฐ’ ํ™•์ธ ์ค‘...")]
327
  time.sleep(0.3)
 
334
  return
335
 
336
  # 2) ์•„์ด๋””์–ด ์ƒ์„ฑ
337
+ yield [("assistant", "์ฐฝ์˜์ ์ธ ๋ชจ๋ธ/์ปจ์…‰/ํ˜•์ƒ ๋ณ€ํ™” ์•„์ด๋””์–ด ์ƒ์„ฑ ์ค‘... (์นดํ…Œ๊ณ ๋ฆฌ๋ณ„ ๋ถ„์„)") ]
338
  time.sleep(0.3)
339
  results, objects = generate_transformations(text1, text2, text3)
 
 
340
  obj_name = " ๋ฐ ".join([obj for obj in objects if obj])
341
 
342
+ # ์นดํ…Œ๊ณ ๋ฆฌ๋ณ„ ์ŠคํŠธ๋ฆฌ๋ฐ ์ฒ˜๋ฆฌ
343
  for i, (category, result_dict) in enumerate(results.items(), start=1):
344
  base_desc = result_dict["base"]
 
 
345
  yield [("assistant", f"**[{i}/{len(results)}] ์นดํ…Œ๊ณ ๋ฆฌ:** {category}\n\n๊ธฐ๋ณธ ์•„์ด๋””์–ด: {base_desc}\n\n์ง€๊ธˆ๋ถ€ํ„ฐ Thinking + Response๋ฅผ ๋‹จ๊ณ„์ ์œผ๋กœ ์ŠคํŠธ๋ฆฌ๋ฐํ•ฉ๋‹ˆ๋‹ค...")]
346
  time.sleep(0.5)
347
 
 
348
  thinking_text = ""
349
  response_text = ""
350
  is_thinking_done = False
351
 
 
352
  for chunk in enhance_with_llm_stream(base_desc, obj_name, category):
353
  if chunk.startswith("[Thinking Chunk]"):
354
+ # ์ƒ๊ฐ ๋‹จ๊ณ„
355
  thinking_text += chunk.replace("[Thinking Chunk]", "")
356
+ yield [("assistant", f"**[Thinking]**\n{thinking_text}")]
 
357
  elif chunk.startswith("[Response Start]"):
358
+ # ์‘๋‹ต ์‹œ์ž‘
359
  is_thinking_done = True
 
360
  partial = chunk.replace("[Response Start]", "")
361
  response_text += partial
362
+ yield [("assistant", f"**[Final Response ์‹œ์ž‘]**\n{partial}")]
 
363
  elif chunk.startswith("[Final Response]"):
364
+ # ์‘๋‹ต ์ตœ์ข…
365
  final = chunk.replace("[Final Response]", "")
366
  response_text += f"\n{final}"
367
  yield [("assistant", f"**[์ตœ์ข… Response]**\n{response_text.strip()}")]
 
369
  # ์ผ๋ฐ˜ ์‘๋‹ต ์ŠคํŠธ๋ฆฌ๋ฐ
370
  if is_thinking_done:
371
  response_text += chunk
372
+ yield [("assistant", f"**[์‘๋‹ต ์ง„ํ–‰]**\n{response_text}")]
373
  else:
374
  thinking_text += chunk
375
  yield [("assistant", f"**[Thinking]**\n{thinking_text}")]
376
 
 
377
  result_dict["enhanced"] = response_text
378
 
379
+ # 3) ์™„๋ฃŒ ๋ฉ”์‹œ์ง€
380
  yield [("assistant", "**๋ชจ๋“  ์นดํ…Œ๊ณ ๋ฆฌ์— ๋Œ€ํ•œ ์ŠคํŠธ๋ฆฌ๋ฐ์ด ์™„๋ฃŒ๋˜์—ˆ์Šต๋‹ˆ๋‹ค!**")]
381
 
382
 
 
389
  gr.Markdown("# ๐Ÿš€ ํ‚ค์›Œ๋“œ ๊ธฐ๋ฐ˜ ์ฐฝ์˜์  ๋ณ€ํ™” ์•„์ด๋””์–ด (Gemini 2.0 Flash Thinking, Streaming)")
390
  gr.Markdown("ํ‚ค์›Œ๋“œ 1~3๊ฐœ๋ฅผ ์ž…๋ ฅํ•˜๋ฉด, **์นดํ…Œ๊ณ ๋ฆฌ๋ณ„๋กœ** 'Thinking'๊ณผ 'Response'๊ฐ€ ์‹ค์‹œ๊ฐ„ ์ŠคํŠธ๋ฆฌ๋ฐ๋ฉ๋‹ˆ๋‹ค.")
391
 
392
+ # Chatbot์—์„œ type="tuples"๋กœ ์ˆ˜์ •
393
  chatbot = gr.Chatbot(
394
  label="์นดํ…Œ๊ณ ๋ฆฌ๋ณ„ ์•„์ด๋””์–ด(Thinking + Response) ์ŠคํŠธ๋ฆฌ๋ฐ",
395
+ type="tuples", # <-- "tuple"์ด ์•„๋‹ˆ๋ผ "tuples"๋กœ ์ˆ˜์ •
396
  render_markdown=True
397
  )
398
 
 
406
  clear_button = gr.Button("๋Œ€ํ™” ์ง€์šฐ๊ธฐ")
407
 
408
  with gr.Column(scale=2):
409
+ pass # ์ด๋ฏธ chatbot์ด ์šฐ์ธก ์˜์—ญ์— ํ• ๋‹น๋จ
 
410
 
411
  def clear_chat():
412
  return []
 
419
  ]
420
  gr.Examples(examples=examples, inputs=[text_input1, text_input2, text_input3])
421
 
422
+ # ์ŠคํŠธ๋ฆฌ๋ฐ ์ฒ˜๋ฆฌ
423
  submit_button.click(
424
  fn=process_inputs_stream,
425
  inputs=[text_input1, text_input2, text_input3],
426
  outputs=chatbot,
427
+ stream=True # stream=True๋กœ ์‹ค์‹œ๊ฐ„ ์ŠคํŠธ๋ฆฌ๋ฐ
428
  )
429
 
430
  clear_button.click(
 
434
 
435
  if __name__ == "__main__":
436
  demo.launch(debug=True)