aiqcamp commited on
Commit
9be8a74
ยท
verified ยท
1 Parent(s): 1c3714b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +65 -77
app.py CHANGED
@@ -1,3 +1,4 @@
 
1
  import os
2
  import gradio as gr
3
  import random
@@ -6,7 +7,6 @@ import logging
6
  from typing import Iterator
7
 
8
  import google.generativeai as genai
9
- from gradio import ChatMessage # ChatMessage ๊ตฌ์กฐ ์‚ฌ์šฉ (Thinking/Response ๊ตฌ๋ถ„ ๊ฐ€๋Šฅ)
10
 
11
  logging.basicConfig(
12
  level=logging.INFO,
@@ -18,17 +18,14 @@ logging.basicConfig(
18
  )
19
  logger = logging.getLogger("idea_generator")
20
 
21
- # Gemini API ํ‚ค ์„ค์ •
22
  GEMINI_API_KEY = os.getenv("GEMINI_API_KEY")
23
  genai.configure(api_key=GEMINI_API_KEY)
24
 
25
- # ์‚ฌ์šฉํ•  Gemini 2.0 Flash ๋ชจ๋ธ (Thinking ๊ธฐ๋Šฅ ํฌํ•จ)
26
  model = genai.GenerativeModel("gemini-2.0-flash-thinking-exp-01-21")
27
 
28
-
29
- ##############################################################################
30
- # ๋ณ€ํ™˜ ๋ฌธ์ž์—ด์—์„œ ์Šฌ๋ž˜์‹œ("/")๋กœ ๊ตฌ๋ถ„๋œ ๋‘ ์˜ต์…˜ ์ค‘ ํ•˜๋‚˜ ์„ ํƒ
31
- ##############################################################################
32
  def choose_alternative(transformation):
33
  if "/" not in transformation:
34
  return transformation
@@ -51,6 +48,7 @@ def choose_alternative(transformation):
51
  return random.choice([left, right])
52
 
53
 
 
54
  ##############################################################################
55
  # ์นดํ…Œ๊ณ ๋ฆฌ ์‚ฌ์ „
56
  # (์•„๋ž˜ ์˜ˆ์‹œ์—์„œ๋Š” ๋ชจ๋“  ์นดํ…Œ๊ณ ๋ฆฌ๋ฅผ ํฌํ•จ์‹œ์ผฐ์ง€๋งŒ, ํ•„์š”์— ๋”ฐ๋ผ ๋ฒ”์œ„๋ฅผ ์กฐ์ •ํ•˜์„ธ์š”.)
@@ -197,16 +195,14 @@ physical_transformation_categories = {
197
  }
198
 
199
 
200
- ##############################################################################
201
- # ์ŠคํŠธ๋ฆฌ๋ฐ์šฉ Gemini API ํ•จ์ˆ˜:
202
- # - 'Thinking' ๋‹จ๊ณ„(์•„์ด๋””์–ด ๋‚ด๋ถ€ ์ถ”๋ก )์™€ ์ตœ์ข… 'Response' ๋‹จ๊ณ„๋กœ ๊ตฌ์„ฑ
203
- ##############################################################################
204
  def query_gemini_api_stream(prompt: str) -> Iterator[str]:
205
  """
206
- Gemini 2.0 Flash with 'Thinking' ๋ถ€๋ถ„๊ณผ 'Response' ๋ถ€๋ถ„์„
207
- ๋ถ„๋ฆฌํ•˜์—ฌ ์ŠคํŠธ๋ฆฌ๋ฐ(Chunk)์œผ๋กœ ์ œ๊ณตํ•œ๋‹ค.
208
  """
209
- # chat ์ดˆ๊ธฐํ™” (history ์—†์ด ๋‹จ๋ฐœ์„ฑ ํ˜ธ์ถœ)
210
  chat = model.start_chat(history=[])
211
  response = chat.send_message(prompt, stream=True)
212
 
@@ -217,33 +213,32 @@ def query_gemini_api_stream(prompt: str) -> Iterator[str]:
217
  for chunk in response:
218
  parts = chunk.candidates[0].content.parts
219
 
220
- # parts๊ฐ€ 2๊ฐœ์ด๋ฉด (0: Thinking, 1: Response ์‹œ์ž‘)
221
  if len(parts) == 2 and not thinking_complete:
 
222
  thought_buffer += parts[0].text
223
  yield f"[Thinking Chunk] {parts[0].text}"
224
-
 
225
  response_buffer = parts[1].text
226
  yield f"[Response Start] {parts[1].text}"
227
-
228
  thinking_complete = True
229
  elif thinking_complete:
230
- # Already in response phase
231
  current_chunk = parts[0].text
232
  response_buffer += current_chunk
233
  yield current_chunk
234
  else:
235
- # Still in thinking phase
236
  current_chunk = parts[0].text
237
  thought_buffer += current_chunk
238
  yield f"[Thinking Chunk] {current_chunk}"
239
 
240
- # ์ŠคํŠธ๋ฆฌ๋ฐ ์™„๋ฃŒ ํ›„ ์ตœ์ข… ๊ฒฐ๊ณผ ํ•œ๋ฒˆ์— ์ œ๊ณต
241
  yield f"\n[Final Response]\n{response_buffer}"
242
 
243
 
244
- ##############################################################################
245
- # ์นดํ…Œ๊ณ ๋ฆฌ๋ณ„ ๊ฐ„๋‹จ ์„ค๋ช…์„ 'Thinking' + 'Response'๋กœ ํ™•์žฅ (์ŠคํŠธ๋ฆฌ๋ฐ)
246
- ##############################################################################
247
  def enhance_with_llm_stream(base_description, obj_name, category) -> Iterator[str]:
248
  prompt = f"""
249
  ๋‹ค์Œ์€ '{obj_name}'์˜ '{category}' ๊ด€๋ จ ๊ฐ„๋‹จํ•œ ์„ค๋ช…์ž…๋‹ˆ๋‹ค:
@@ -257,9 +252,7 @@ def enhance_with_llm_stream(base_description, obj_name, category) -> Iterator[st
257
  yield chunk
258
 
259
 
260
- ##############################################################################
261
- # ํ•œ ํ‚ค์›Œ๋“œ(์˜ค๋ธŒ์ ํŠธ)์— ๋Œ€ํ•œ ๊ธฐ๋ณธ ์•„์ด๋””์–ด(์นดํ…Œ๊ณ ๋ฆฌ๋ณ„) ์ƒ์„ฑ
262
- ##############################################################################
263
  def generate_single_object_transformations(obj):
264
  results = {}
265
  for category, transformations in physical_transformation_categories.items():
@@ -268,9 +261,7 @@ def generate_single_object_transformations(obj):
268
  results[category] = {"base": base_description, "enhanced": ""}
269
  return results
270
 
271
- ##############################################################################
272
- # 2๊ฐœ ํ‚ค์›Œ๋“œ ์ƒํ˜ธ์ž‘์šฉ
273
- ##############################################################################
274
  def generate_two_objects_interaction(obj1, obj2):
275
  results = {}
276
  for category, transformations in physical_transformation_categories.items():
@@ -283,9 +274,7 @@ def generate_two_objects_interaction(obj1, obj2):
283
  results[category] = {"base": base_description, "enhanced": ""}
284
  return results
285
 
286
- ##############################################################################
287
- # 3๊ฐœ ํ‚ค์›Œ๋“œ ์ƒํ˜ธ์ž‘์šฉ
288
- ##############################################################################
289
  def generate_three_objects_interaction(obj1, obj2, obj3):
290
  results = {}
291
  for category, transformations in physical_transformation_categories.items():
@@ -299,9 +288,6 @@ def generate_three_objects_interaction(obj1, obj2, obj3):
299
  return results
300
 
301
 
302
- ##############################################################################
303
- # ์‹ค์ œ ๋ณ€ํ™˜ ์ƒ์„ฑ ๋กœ์ง
304
- ##############################################################################
305
  def generate_transformations(text1, text2=None, text3=None):
306
  if text2 and text3:
307
  results = generate_three_objects_interaction(text1, text2, text3)
@@ -314,85 +300,88 @@ def generate_transformations(text1, text2=None, text3=None):
314
  objects = [text1]
315
  return results, objects
316
 
317
- ##############################################################################
318
- # ์ŠคํŠธ๋ฆฌ๋ฐ: ๊ฐ ์นดํ…Œ๊ณ ๋ฆฌ๋ณ„๋กœ 'Thinking' + 'Response' ๋ถ€๋ถ„์„ ์‹ค์‹œ๊ฐ„ ์ „๋‹ฌ
319
- ##############################################################################
320
  def process_inputs_stream(text1, text2, text3) -> Iterator[list]:
321
  """
322
- Gradio์˜ Chatbot ์ปดํฌ๋„ŒํŠธ์— ๋งž์ถฐ์„œ
323
- [(role='assistant', content=...)] ํ˜•ํƒœ์˜ ๋ฆฌ์ŠคํŠธ๋ฅผ yield.
324
  """
325
- # 1) ์ž…๋ ฅ๊ฐ’ ํ™•์ธ
326
- yield [("assistant", "์ž…๋ ฅ๊ฐ’ ํ™•์ธ ์ค‘...")]
327
  time.sleep(0.3)
328
 
329
  text1 = text1.strip() if text1 else None
330
  text2 = text2.strip() if text2 else None
331
  text3 = text3.strip() if text3 else None
332
  if not text1:
333
- yield [("assistant", "์˜ค๋ฅ˜: ์ตœ์†Œ ํ•˜๋‚˜์˜ ํ‚ค์›Œ๋“œ๋ฅผ ์ž…๋ ฅํ•ด์ฃผ์„ธ์š”.")]
334
  return
335
 
336
  # 2) ์•„์ด๋””์–ด ์ƒ์„ฑ
337
- yield [("assistant", "์ฐฝ์˜์ ์ธ ๋ชจ๋ธ/์ปจ์…‰/ํ˜•์ƒ ๋ณ€ํ™” ์•„์ด๋””์–ด ์ƒ์„ฑ ์ค‘... (์นดํ…Œ๊ณ ๋ฆฌ๋ณ„ ๋ถ„์„)") ]
338
  time.sleep(0.3)
339
  results, objects = generate_transformations(text1, text2, text3)
 
340
  obj_name = " ๋ฐ ".join([obj for obj in objects if obj])
341
 
342
- # ์นดํ…Œ๊ณ ๋ฆฌ๋ณ„ ์ŠคํŠธ๋ฆฌ๋ฐ ์ฒ˜๋ฆฌ
343
- for i, (category, result_dict) in enumerate(results.items(), start=1):
344
- base_desc = result_dict["base"]
345
- yield [("assistant", f"**[{i}/{len(results)}] ์นดํ…Œ๊ณ ๋ฆฌ:** {category}\n\n๊ธฐ๋ณธ ์•„์ด๋””์–ด: {base_desc}\n\n์ง€๊ธˆ๋ถ€ํ„ฐ Thinking + Response๋ฅผ ๋‹จ๊ณ„์ ์œผ๋กœ ์ŠคํŠธ๋ฆฌ๋ฐํ•ฉ๋‹ˆ๋‹ค...")]
 
 
 
346
  time.sleep(0.5)
347
 
348
  thinking_text = ""
349
  response_text = ""
350
- is_thinking_done = False
351
 
352
  for chunk in enhance_with_llm_stream(base_desc, obj_name, category):
353
  if chunk.startswith("[Thinking Chunk]"):
354
- # ์ƒ๊ฐ ๋‹จ๊ณ„
355
  thinking_text += chunk.replace("[Thinking Chunk]", "")
356
- yield [("assistant", f"**[Thinking]**\n{thinking_text}")]
 
357
  elif chunk.startswith("[Response Start]"):
358
- # ์‘๋‹ต ์‹œ์ž‘
359
- is_thinking_done = True
360
  partial = chunk.replace("[Response Start]", "")
361
  response_text += partial
362
- yield [("assistant", f"**[Final Response ์‹œ์ž‘]**\n{partial}")]
 
363
  elif chunk.startswith("[Final Response]"):
364
- # ์‘๋‹ต ์ตœ์ข…
365
  final = chunk.replace("[Final Response]", "")
366
  response_text += f"\n{final}"
367
- yield [("assistant", f"**[์ตœ์ข… Response]**\n{response_text.strip()}")]
 
368
  else:
369
  # ์ผ๋ฐ˜ ์‘๋‹ต ์ŠคํŠธ๋ฆฌ๋ฐ
370
- if is_thinking_done:
371
  response_text += chunk
372
- yield [("assistant", f"**[์‘๋‹ต ์ง„ํ–‰]**\n{response_text}")]
373
  else:
374
  thinking_text += chunk
375
- yield [("assistant", f"**[Thinking]**\n{thinking_text}")]
376
 
377
- result_dict["enhanced"] = response_text
378
 
379
- # 3) ์™„๋ฃŒ ๋ฉ”์‹œ์ง€
380
- yield [("assistant", "**๋ชจ๋“  ์นดํ…Œ๊ณ ๋ฆฌ์— ๋Œ€ํ•œ ์ŠคํŠธ๋ฆฌ๋ฐ์ด ์™„๋ฃŒ๋˜์—ˆ์Šต๋‹ˆ๋‹ค!**")]
381
 
382
 
383
  ##############################################################################
384
- # Gradio UI
385
  ##############################################################################
386
- with gr.Blocks(title="์ŠคํŠธ๋ฆฌ๋ฐ ์˜ˆ์ œ: Gemini 2.0 Flash Thinking",
387
- theme=gr.themes.Soft(primary_hue="teal", secondary_hue="slate", neutral_hue="neutral")) as demo:
388
-
389
- gr.Markdown("# ๐Ÿš€ ํ‚ค์›Œ๋“œ ๊ธฐ๋ฐ˜ ์ฐฝ์˜์  ๋ณ€ํ™” ์•„์ด๋””์–ด (Gemini 2.0 Flash Thinking, Streaming)")
390
- gr.Markdown("ํ‚ค์›Œ๋“œ 1~3๊ฐœ๋ฅผ ์ž…๋ ฅํ•˜๋ฉด, **์นดํ…Œ๊ณ ๋ฆฌ๋ณ„๋กœ** 'Thinking'๊ณผ 'Response'๊ฐ€ ์‹ค์‹œ๊ฐ„ ์ŠคํŠธ๋ฆฌ๋ฐ๋ฉ๋‹ˆ๋‹ค.")
391
 
392
- # Chatbot์—์„œ type="tuples"๋กœ ์ˆ˜์ •
393
  chatbot = gr.Chatbot(
394
- label="์นดํ…Œ๊ณ ๋ฆฌ๋ณ„ ์•„์ด๋””์–ด(Thinking + Response) ์ŠคํŠธ๋ฆฌ๋ฐ",
395
- type="tuples", # <-- "tuple"์ด ์•„๋‹ˆ๋ผ "tuples"๋กœ ์ˆ˜์ •
396
  render_markdown=True
397
  )
398
 
@@ -402,29 +391,28 @@ with gr.Blocks(title="์ŠคํŠธ๋ฆฌ๋ฐ ์˜ˆ์ œ: Gemini 2.0 Flash Thinking",
402
  text_input2 = gr.Textbox(label="ํ‚ค์›Œ๋“œ 2 (์„ ํƒ)", placeholder="์˜ˆ: ๋กœ๋ด‡")
403
  text_input3 = gr.Textbox(label="ํ‚ค์›Œ๋“œ 3 (์„ ํƒ)", placeholder="์˜ˆ: ์ธ๊ณต์ง€๋Šฅ")
404
  submit_button = gr.Button("์•„์ด๋””์–ด ์ƒ์„ฑํ•˜๊ธฐ")
405
-
406
  clear_button = gr.Button("๋Œ€ํ™” ์ง€์šฐ๊ธฐ")
407
-
408
  with gr.Column(scale=2):
409
- pass # ์ด๋ฏธ chatbot์ด ์šฐ์ธก ์˜์—ญ์— ํ• ๋‹น๋จ
410
 
411
  def clear_chat():
412
  return []
413
 
 
414
  examples = [
415
  ["์ž๋™์ฐจ", "", ""],
416
  ["์Šค๋งˆํŠธํฐ", "์ธ๊ณต์ง€๋Šฅ", ""],
417
  ["๋“œ๋ก ", "์ธ๊ณต์ง€๋Šฅ", ""],
418
- ["์šด๋™ํ™”", "์›จ์–ด๋Ÿฌ๋ธ”", "๊ฑด๊ฐ•"],
419
  ]
420
  gr.Examples(examples=examples, inputs=[text_input1, text_input2, text_input3])
421
 
422
- # ์ŠคํŠธ๋ฆฌ๋ฐ ์ฒ˜๋ฆฌ
423
  submit_button.click(
424
  fn=process_inputs_stream,
425
  inputs=[text_input1, text_input2, text_input3],
426
  outputs=chatbot,
427
- stream=True # stream=True๋กœ ์‹ค์‹œ๊ฐ„ ์ŠคํŠธ๋ฆฌ๋ฐ
428
  )
429
 
430
  clear_button.click(
 
1
+
2
  import os
3
  import gradio as gr
4
  import random
 
7
  from typing import Iterator
8
 
9
  import google.generativeai as genai
 
10
 
11
  logging.basicConfig(
12
  level=logging.INFO,
 
18
  )
19
  logger = logging.getLogger("idea_generator")
20
 
21
+ # ====== Gemini API ์„ค์ • ======
22
  GEMINI_API_KEY = os.getenv("GEMINI_API_KEY")
23
  genai.configure(api_key=GEMINI_API_KEY)
24
 
25
+ # ====== ์‚ฌ์šฉํ•  Gemini ๋ชจ๋ธ ======
26
  model = genai.GenerativeModel("gemini-2.0-flash-thinking-exp-01-21")
27
 
28
+ # ์Šฌ๋ž˜์‹œ("/")๋กœ ๊ตฌ๋ถ„๋œ ๋ณ€ํ™˜ ๋ฌธ์ž์—ด์—์„œ ํ•œ ์˜ต์…˜๋งŒ ์„ ํƒ
 
 
 
29
  def choose_alternative(transformation):
30
  if "/" not in transformation:
31
  return transformation
 
48
  return random.choice([left, right])
49
 
50
 
51
+
52
  ##############################################################################
53
  # ์นดํ…Œ๊ณ ๋ฆฌ ์‚ฌ์ „
54
  # (์•„๋ž˜ ์˜ˆ์‹œ์—์„œ๋Š” ๋ชจ๋“  ์นดํ…Œ๊ณ ๋ฆฌ๋ฅผ ํฌํ•จ์‹œ์ผฐ์ง€๋งŒ, ํ•„์š”์— ๋”ฐ๋ผ ๋ฒ”์œ„๋ฅผ ์กฐ์ •ํ•˜์„ธ์š”.)
 
195
  }
196
 
197
 
198
+
199
+ # ====== Gemini ์ŠคํŠธ๋ฆฌ๋ฐ API ํ•จ์ˆ˜ ======
 
 
200
  def query_gemini_api_stream(prompt: str) -> Iterator[str]:
201
  """
202
+ Gemini 2.0 Flash ๋ชจ๋ธ์—์„œ Thinking(์‚ฌ๊ณ  ๊ณผ์ •) + Response(์ตœ์ข… ๋‹ต๋ณ€)๋ฅผ
203
+ stream=True๋กœ ๋ฐ›์•„, chunk ๋‹จ์œ„๋กœ yield.
204
  """
205
+ # ๋Œ€ํ™” ์ด๋ ฅ ์—†์ด ๋‹จ์ˆœ ํ˜ธ์ถœ
206
  chat = model.start_chat(history=[])
207
  response = chat.send_message(prompt, stream=True)
208
 
 
213
  for chunk in response:
214
  parts = chunk.candidates[0].content.parts
215
 
 
216
  if len(parts) == 2 and not thinking_complete:
217
+ # ์ฒซ ๋ฒˆ์งธ part: Thinking
218
  thought_buffer += parts[0].text
219
  yield f"[Thinking Chunk] {parts[0].text}"
220
+
221
+ # ๋‘ ๋ฒˆ์งธ part: Response
222
  response_buffer = parts[1].text
223
  yield f"[Response Start] {parts[1].text}"
224
+
225
  thinking_complete = True
226
  elif thinking_complete:
227
+ # ์ด๋ฏธ ์‘๋‹ต ๋‹จ๊ณ„์— ์žˆ์Œ
228
  current_chunk = parts[0].text
229
  response_buffer += current_chunk
230
  yield current_chunk
231
  else:
232
+ # ์•„์ง Thinking ๋‹จ๊ณ„
233
  current_chunk = parts[0].text
234
  thought_buffer += current_chunk
235
  yield f"[Thinking Chunk] {current_chunk}"
236
 
237
+ # ๋งˆ์ง€๋ง‰์— ์ „์ฒด ์ตœ์ข… ์‘๋‹ต
238
  yield f"\n[Final Response]\n{response_buffer}"
239
 
240
 
241
+ # ====== ์„ค๋ช… ํ™•์žฅ ํ•จ์ˆ˜ (์ŠคํŠธ๋ฆฌ๋ฐ) ======
 
 
242
  def enhance_with_llm_stream(base_description, obj_name, category) -> Iterator[str]:
243
  prompt = f"""
244
  ๋‹ค์Œ์€ '{obj_name}'์˜ '{category}' ๊ด€๋ จ ๊ฐ„๋‹จํ•œ ์„ค๋ช…์ž…๋‹ˆ๋‹ค:
 
252
  yield chunk
253
 
254
 
255
+ # ====== ๋‹จ์ผ ํ‚ค์›Œ๋“œ(์˜ค๋ธŒ์ ํŠธ)์— ๋Œ€ํ•œ ์•„์ด๋””์–ด ์ƒ์„ฑ ======
 
 
256
  def generate_single_object_transformations(obj):
257
  results = {}
258
  for category, transformations in physical_transformation_categories.items():
 
261
  results[category] = {"base": base_description, "enhanced": ""}
262
  return results
263
 
264
+ # ====== 2๊ฐœ ์˜ค๋ธŒ์ ํŠธ ์ƒํ˜ธ์ž‘์šฉ ======
 
 
265
  def generate_two_objects_interaction(obj1, obj2):
266
  results = {}
267
  for category, transformations in physical_transformation_categories.items():
 
274
  results[category] = {"base": base_description, "enhanced": ""}
275
  return results
276
 
277
+ # ====== 3๊ฐœ ์˜ค๋ธŒ์ ํŠธ ์ƒํ˜ธ์ž‘์šฉ ======
 
 
278
  def generate_three_objects_interaction(obj1, obj2, obj3):
279
  results = {}
280
  for category, transformations in physical_transformation_categories.items():
 
288
  return results
289
 
290
 
 
 
 
291
  def generate_transformations(text1, text2=None, text3=None):
292
  if text2 and text3:
293
  results = generate_three_objects_interaction(text1, text2, text3)
 
300
  objects = [text1]
301
  return results, objects
302
 
303
+
304
+ # ====== ์ŠคํŠธ๋ฆฌ๋ฐ์œผ๋กœ ๊ฐ ์นดํ…Œ๊ณ ๋ฆฌ๋ฅผ ์ˆœํšŒํ•˜๋ฉฐ Thinking + Response ํ‘œ์‹œ ======
 
305
  def process_inputs_stream(text1, text2, text3) -> Iterator[list]:
306
  """
307
+ Gradio 3.27+ ์ด์ƒ์—์„œ๋งŒ stream=True๋กœ ์‹ค์‹œ๊ฐ„ ์—…๋ฐ์ดํŠธ ๊ฐ€๋Šฅ.
308
+ ๋ฉ”์‹œ์ง€๋Š” [{'role': 'assistant', 'content': ...}, ...] ํ˜•ํƒœ๋กœ ๋ฐ˜ํ™˜.
309
  """
310
+ # 1) ์ž…๋ ฅ๊ฐ’ ๊ฒ€์ฆ
311
+ yield [{"role": "assistant", "content": "์ž…๋ ฅ๊ฐ’ ํ™•์ธ ์ค‘..."}]
312
  time.sleep(0.3)
313
 
314
  text1 = text1.strip() if text1 else None
315
  text2 = text2.strip() if text2 else None
316
  text3 = text3.strip() if text3 else None
317
  if not text1:
318
+ yield [{"role": "assistant", "content": "์˜ค๋ฅ˜: ์ตœ์†Œ ํ•˜๋‚˜์˜ ํ‚ค์›Œ๋“œ๋ฅผ ์ž…๋ ฅํ•ด์ฃผ์„ธ์š”."}]
319
  return
320
 
321
  # 2) ์•„์ด๋””์–ด ์ƒ์„ฑ
322
+ yield [{"role": "assistant", "content": "์ฐฝ์˜์ ์ธ ๋ชจ๋ธ/์ปจ์…‰/ํ˜•์ƒ ๋ณ€ํ™” ์•„์ด๋””์–ด ์ƒ์„ฑ ์ค‘... (์นดํ…Œ๊ณ ๋ฆฌ๋ณ„ ๋ถ„์„)"}]
323
  time.sleep(0.3)
324
  results, objects = generate_transformations(text1, text2, text3)
325
+
326
  obj_name = " ๋ฐ ".join([obj for obj in objects if obj])
327
 
328
+ # ์นดํ…Œ๊ณ ๋ฆฌ๋ณ„ ์ŠคํŠธ๋ฆฌ๋ฐ
329
+ for i, (category, info) in enumerate(results.items(), start=1):
330
+ base_desc = info["base"]
331
+ yield [{
332
+ "role": "assistant",
333
+ "content": f"**[{i}/{len(results)}] ์นดํ…Œ๊ณ ๋ฆฌ:** {category}\n\n๊ธฐ๋ณธ ์•„์ด๋””์–ด: {base_desc}\n\nThinking + Response ์ŠคํŠธ๋ฆฌ๋ฐ ์‹œ์ž‘..."
334
+ }]
335
  time.sleep(0.5)
336
 
337
  thinking_text = ""
338
  response_text = ""
339
+ thinking_done = False
340
 
341
  for chunk in enhance_with_llm_stream(base_desc, obj_name, category):
342
  if chunk.startswith("[Thinking Chunk]"):
343
+ # Thinking ๋‹จ๊ณ„
344
  thinking_text += chunk.replace("[Thinking Chunk]", "")
345
+ yield [{"role": "assistant", "content": f"**[Thinking]**\n{thinking_text}"}]
346
+
347
  elif chunk.startswith("[Response Start]"):
348
+ # Response ์‹œ์ž‘
349
+ thinking_done = True
350
  partial = chunk.replace("[Response Start]", "")
351
  response_text += partial
352
+ yield [{"role": "assistant", "content": f"**[Response ์‹œ์ž‘]**\n{partial}"}]
353
+
354
  elif chunk.startswith("[Final Response]"):
355
+ # ์ตœ์ข… ์‘๋‹ต
356
  final = chunk.replace("[Final Response]", "")
357
  response_text += f"\n{final}"
358
+ yield [{"role": "assistant", "content": f"**[์ตœ์ข… Response]**\n{response_text.strip()}"}]
359
+
360
  else:
361
  # ์ผ๋ฐ˜ ์‘๋‹ต ์ŠคํŠธ๋ฆฌ๋ฐ
362
+ if thinking_done:
363
  response_text += chunk
364
+ yield [{"role": "assistant", "content": f"**[์‘๋‹ต ์ง„ํ–‰]**\n{response_text}"}]
365
  else:
366
  thinking_text += chunk
367
+ yield [{"role": "assistant", "content": f"**[Thinking]**\n{thinking_text}"}]
368
 
369
+ info["enhanced"] = response_text
370
 
371
+ # ์™„๋ฃŒ ์•Œ๋ฆผ
372
+ yield [{"role": "assistant", "content": "**๋ชจ๋“  ์นดํ…Œ๊ณ ๋ฆฌ์— ๋Œ€ํ•œ ์ŠคํŠธ๋ฆฌ๋ฐ์ด ์™„๋ฃŒ๋˜์—ˆ์Šต๋‹ˆ๋‹ค!**"}]
373
 
374
 
375
  ##############################################################################
376
+ # Gradio UI (Chatbot: type='messages')
377
  ##############################################################################
378
+ with gr.Blocks(title="Gemini Flash Thinking (Stream)", theme=gr.themes.Soft(primary_hue="teal")) as demo:
379
+ gr.Markdown("# ๐Ÿš€ ํ‚ค์›Œ๋“œ ๊ธฐ๋ฐ˜ ์ฐฝ์˜์  ๋ณ€ํ™” ์•„์ด๋””์–ด (Gemini 2.0 Flash Thinking, Streaming)\n"+
380
+ "ํ‚ค์›Œ๋“œ 1~3๊ฐœ๋ฅผ ์ž…๋ ฅํ•˜๋ฉด, **์นดํ…Œ๊ณ ๋ฆฌ๋ณ„**๋กœ 'Thinking'๊ณผ 'Response'๊ฐ€ ์‹ค์‹œ๊ฐ„ ์ŠคํŠธ๋ฆฌ๋ฐ๋ฉ๋‹ˆ๋‹ค.")
 
 
381
 
 
382
  chatbot = gr.Chatbot(
383
+ label="์นดํ…Œ๊ณ ๋ฆฌ๋ณ„ ์ŠคํŠธ๋ฆฌ๋ฐ",
384
+ type="messages", # OpenAI ์Šคํƒ€์ผ {"role":"assistant", "content":...} ํฌ๋งท
385
  render_markdown=True
386
  )
387
 
 
391
  text_input2 = gr.Textbox(label="ํ‚ค์›Œ๋“œ 2 (์„ ํƒ)", placeholder="์˜ˆ: ๋กœ๋ด‡")
392
  text_input3 = gr.Textbox(label="ํ‚ค์›Œ๋“œ 3 (์„ ํƒ)", placeholder="์˜ˆ: ์ธ๊ณต์ง€๋Šฅ")
393
  submit_button = gr.Button("์•„์ด๋””์–ด ์ƒ์„ฑํ•˜๊ธฐ")
 
394
  clear_button = gr.Button("๋Œ€ํ™” ์ง€์šฐ๊ธฐ")
395
+
396
  with gr.Column(scale=2):
397
+ pass
398
 
399
  def clear_chat():
400
  return []
401
 
402
+ # ์˜ˆ์‹œ
403
  examples = [
404
  ["์ž๋™์ฐจ", "", ""],
405
  ["์Šค๋งˆํŠธํฐ", "์ธ๊ณต์ง€๋Šฅ", ""],
406
  ["๋“œ๋ก ", "์ธ๊ณต์ง€๋Šฅ", ""],
407
+ ["์šด๋™ํ™”", "์›จ์–ด๋Ÿฌ๋ธ”", "๊ฑด๊ฐ•"],
408
  ]
409
  gr.Examples(examples=examples, inputs=[text_input1, text_input2, text_input3])
410
 
 
411
  submit_button.click(
412
  fn=process_inputs_stream,
413
  inputs=[text_input1, text_input2, text_input3],
414
  outputs=chatbot,
415
+ stream=True # ์ตœ์‹  Gradio(3.27+)์—์„œ๋งŒ ์ง€์›
416
  )
417
 
418
  clear_button.click(