aiqcamp commited on
Commit
0495fd2
ยท
verified ยท
1 Parent(s): 6f20114

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +195 -125
app.py CHANGED
@@ -1,9 +1,14 @@
 
 
1
  import os
2
  import gradio as gr
3
  import random
4
  import time
5
  import logging
 
 
6
  import google.generativeai as genai
 
7
 
8
  logging.basicConfig(
9
  level=logging.INFO,
@@ -15,10 +20,16 @@ logging.basicConfig(
15
  )
16
  logger = logging.getLogger("idea_generator")
17
 
 
18
  GEMINI_API_KEY = os.getenv("GEMINI_API_KEY")
19
  genai.configure(api_key=GEMINI_API_KEY)
20
 
 
 
21
 
 
 
 
22
  def choose_alternative(transformation):
23
  if "/" not in transformation:
24
  return transformation
@@ -40,6 +51,9 @@ def choose_alternative(transformation):
40
  else:
41
  return random.choice([left, right])
42
 
 
 
 
43
 
44
  physical_transformation_categories = {
45
  "๊ณต๊ฐ„ ์ด๋™": [
@@ -181,38 +195,60 @@ physical_transformation_categories = {
181
  "๋ผ์ด๋‹ค ์„ผ์„œ/๊ฐ์ง€", "ํ„ฐ์น˜ ์„ผ์„œ/๊ฐ์ง€", "์ œ์Šค์ฒ˜ ์„ผ์„œ/๊ฐ์ง€", "์‹ฌ๋ฐ• ์„ผ์„œ/๊ฐ์ง€", "ํ˜ˆ์•• ์„ผ์„œ/๊ฐ์ง€"
182
  ]
183
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
184
 
185
- def query_gemini_api(prompt):
186
- try:
187
- model = genai.GenerativeModel('gemini-2.0-flash-thinking-exp-01-21')
188
- response = model.generate_content(prompt)
189
-
190
- try:
191
- if hasattr(response, 'text'):
192
- return response.text
193
- if hasattr(response, 'candidates') and response.candidates:
194
- if len(response.candidates) > 0:
195
- candidate = response.candidates[0]
196
- if hasattr(candidate, 'content'):
197
- content = candidate.content
198
- if hasattr(content, 'parts') and content.parts:
199
- if len(content.parts) > 0:
200
- return content.parts[0].text
201
- if hasattr(response, 'parts') and response.parts:
202
- if len(response.parts) > 0:
203
- return response.parts[0].text
204
- return "Unable to generate a response. API response structure is different than expected."
205
- except Exception as inner_e:
206
- logger.error(f"Error processing response: {inner_e}")
207
- return f"An error occurred while processing the response: {str(inner_e)}"
208
- except Exception as e:
209
- logger.error(f"Error calling Gemini API: {e}")
210
- if "API key not valid" in str(e):
211
- return "API key is not valid. Please check your GEMINI_API_KEY environment variable."
212
- return f"An error occurred while calling the API: {str(e)}"
213
-
214
-
215
- def enhance_with_llm(base_description, obj_name, category):
 
 
 
 
 
 
216
  prompt = f"""
217
  ๋‹ค์Œ์€ '{obj_name}'์˜ '{category}' ๊ด€๋ จ ๊ฐ„๋‹จํ•œ ์„ค๋ช…์ž…๋‹ˆ๋‹ค:
218
  "{base_description}"
@@ -221,18 +257,24 @@ def enhance_with_llm(base_description, obj_name, category):
221
  2) ํ˜์‹  ํฌ์ธํŠธ์™€ ๊ธฐ๋Šฅ์„ฑ ๋“ฑ์„ ์ค‘์‹ฌ์œผ๋กœ
222
  3~4๋ฌธ์žฅ์˜ ์•„์ด๋””์–ด๋กœ ํ™•์žฅํ•ด ์ฃผ์„ธ์š”.
223
  """
224
- return query_gemini_api(prompt)
225
-
 
226
 
 
 
 
227
  def generate_single_object_transformations(obj):
228
  results = {}
229
  for category, transformations in physical_transformation_categories.items():
230
  transformation = choose_alternative(random.choice(transformations))
231
  base_description = f"{obj}์ด(๊ฐ€) {transformation} ํ˜„์ƒ์„ ๋ณด์ธ๋‹ค"
232
- results[category] = {"base": base_description, "enhanced": None}
233
  return results
234
 
235
-
 
 
236
  def generate_two_objects_interaction(obj1, obj2):
237
  results = {}
238
  for category, transformations in physical_transformation_categories.items():
@@ -242,10 +284,12 @@ def generate_two_objects_interaction(obj1, obj2):
242
  "{obj1}๊ณผ(์™€) {obj2}์ด(๊ฐ€) ์ถฉ๋Œํ•˜๋ฉด์„œ {change}๊ฐ€ ์ผ์–ด๋‚ฌ๋‹ค"
243
  ])
244
  base_description = template.format(obj1=obj1, obj2=obj2, change=transformation)
245
- results[category] = {"base": base_description, "enhanced": None}
246
  return results
247
 
248
-
 
 
249
  def generate_three_objects_interaction(obj1, obj2, obj3):
250
  results = {}
251
  for category, transformations in physical_transformation_categories.items():
@@ -255,17 +299,12 @@ def generate_three_objects_interaction(obj1, obj2, obj3):
255
  "{obj1}์ด(๊ฐ€) {obj2}์™€(๊ณผ) {obj3} ์‚ฌ์ด์—์„œ ๋งค๊ฐœ์ฒด ์—ญํ• ์„ ํ•˜๋ฉฐ {change}๋ฅผ ์ด‰์ง„ํ–ˆ๋‹ค"
256
  ])
257
  base_description = template.format(obj1=obj1, obj2=obj2, obj3=obj3, change=transformation)
258
- results[category] = {"base": base_description, "enhanced": None}
259
- return results
260
-
261
-
262
- def enhance_descriptions(results, objects):
263
- obj_name = " ๋ฐ ".join([obj for obj in objects if obj])
264
- for category, result in results.items():
265
- result["enhanced"] = enhance_with_llm(result["base"], obj_name, category)
266
  return results
267
 
268
-
 
 
269
  def generate_transformations(text1, text2=None, text3=None):
270
  if text2 and text3:
271
  results = generate_three_objects_interaction(text1, text2, text3)
@@ -276,106 +315,137 @@ def generate_transformations(text1, text2=None, text3=None):
276
  else:
277
  results = generate_single_object_transformations(text1)
278
  objects = [text1]
279
- return enhance_descriptions(results, objects)
280
-
281
-
282
- def format_results(results):
283
- formatted = ""
284
- for category, result in results.items():
285
- formatted += f"## {category}\n**๊ธฐ๋ณธ ์•„์ด๋””์–ด**: {result['base']}\n\n**ํ™•์žฅ๋œ ์•„์ด๋””์–ด**: {result['enhanced']}\n\n---\n\n"
286
- return formatted
287
-
288
 
289
  ##############################################################################
290
- # ์ŠคํŠธ๋ฆฌ๋ฐ(Streaming) ๋ฐฉ์‹์œผ๋กœ ์ถœ๋ ฅํ•˜๋Š” ํ•จ์ˆ˜: yield๋ฅผ ์‚ฌ์šฉ
291
  ##############################################################################
292
- def process_inputs_stream(text1, text2, text3):
293
- # 1) ์ฒซ ๋ฉ”์‹œ์ง€
294
- yield "์ž…๋ ฅ๊ฐ’ ํ™•์ธ ์ค‘..."
 
 
 
 
 
 
 
295
  time.sleep(0.3)
296
-
297
  text1 = text1.strip() if text1 else None
298
  text2 = text2.strip() if text2 else None
299
  text3 = text3.strip() if text3 else None
300
  if not text1:
301
- yield "์˜ค๋ฅ˜: ์ตœ์†Œ ํ•˜๋‚˜์˜ ํ‚ค์›Œ๋“œ๋ฅผ ์ž…๋ ฅํ•ด์ฃผ์„ธ์š”."
302
- return # ์—ฌ๊ธฐ์„œ ํ•จ์ˆ˜ ์ข…๋ฃŒ
303
-
304
- # 2) ๋‹ค์Œ ๋ฉ”์‹œ์ง€
305
- yield "์ฐฝ์˜์ ์ธ ๋ชจ๋ธ/์ปจ์…‰/ํ˜•์ƒ ๋ณ€ํ™” ์•„์ด๋””์–ด ์ƒ์„ฑ ์ค‘..."
306
- time.sleep(0.3)
307
-
308
- # 3) ์‹ค์ œ ์•„์ด๋””์–ด ์ƒ์„ฑ
309
- results = generate_transformations(text1, text2, text3)
310
-
311
- # 4) ์ค‘๊ฐ„ ๋‹จ๊ณ„ ์ถœ๋ ฅ
312
- yield "๊ฒฐ๊ณผ ํฌ๋งทํŒ… ์ค‘..."
313
  time.sleep(0.3)
314
-
315
- # 5) ์ตœ์ข… ๊ฒฐ๊ณผ ์ •๋ฆฌ
316
- formatted = format_results(results)
317
-
318
- # 6) ๊ฒฐ๊ณผ ์ถœ๋ ฅ
319
- yield formatted
320
-
321
- # 7) ์™„๋ฃŒ
322
- yield "์™„๋ฃŒ!"
323
 
324
- def get_warning_message():
325
- if not GEMINI_API_KEY:
326
- return "โš ๏ธ ํ™˜๊ฒฝ ๋ณ€์ˆ˜ GEMINI_API_KEY๊ฐ€ ์„ค์ •๋˜์ง€ ์•Š์•˜์Šต๋‹ˆ๋‹ค. Gemini API ํ‚ค๋ฅผ ์„ค์ •ํ•˜์„ธ์š”."
327
- return ""
328
 
 
 
329
 
330
- with gr.Blocks(title="ํ‚ค์›Œ๋“œ ๊ธฐ๋ฐ˜ ์ฐฝ์˜์  ๋ณ€ํ™” ์•„์ด๋””์–ด ์ƒ์„ฑ๊ธฐ",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
331
  theme=gr.themes.Soft(primary_hue="teal", secondary_hue="slate", neutral_hue="neutral")) as demo:
332
-
333
- gr.HTML("""
334
- <style>
335
- body { background: linear-gradient(135deg, #e0eafc, #cfdef3); font-family: 'Arial', sans-serif; }
336
- .gradio-container { padding: 20px; }
337
- h1, h2 { text-align: center; }
338
- h1 { color: #333; }
339
- h2 { color: #555; }
340
- .output { background-color: #ffffff; padding: 15px; border-radius: 8px; }
341
- .gr-button { background-color: #4CAF50; color: white; border: none; border-radius: 4px; padding: 8px 16px; }
342
- </style>
343
- """)
344
-
345
- gr.Markdown("# ๐Ÿš€ ํ‚ค์›Œ๋“œ ๊ธฐ๋ฐ˜ ์ฐฝ์˜์  ๋ณ€ํ™” ์•„์ด๋””์–ด ์ƒ์„ฑ๊ธฐ")
346
- gr.Markdown("์ž…๋ ฅํ•œ **ํ‚ค์›Œ๋“œ**(์ตœ๋Œ€ 3๊ฐœ)๋ฅผ ๋ฐ”ํƒ•์œผ๋กœ, **์ฐฝ์˜์ ์ธ ๋ชจ๋ธ/์ปจ์…‰/ํ˜•์ƒ ๋ณ€ํ™”**์— ๋Œ€ํ•œ ์ดํ•ด์™€ **ํ˜์‹  ํฌ์ธํŠธ**, **๊ธฐ๋Šฅ์„ฑ** ๋“ฑ์„ ์ค‘์‹ฌ์œผ๋กœ ํ™•์žฅ๋œ ์•„์ด๋””์–ด๋ฅผ ์ œ์‹œํ•ฉ๋‹ˆ๋‹ค.")
347
-
348
- warning = gr.Markdown(get_warning_message())
349
-
350
  with gr.Row():
351
  with gr.Column(scale=1):
352
- text_input1 = gr.Textbox(label="ํ‚ค์›Œ๋“œ 1 (ํ•„์ˆ˜)", placeholder="์˜ˆ: ์Šค๋งˆํŠธํฐ")
353
- text_input2 = gr.Textbox(label="ํ‚ค์›Œ๋“œ 2 (์„ ํƒ)", placeholder="์˜ˆ: ์ธ๊ณต์ง€๋Šฅ")
354
- text_input3 = gr.Textbox(label="ํ‚ค์›Œ๋“œ 3 (์„ ํƒ)", placeholder="์˜ˆ: ํ—ฌ์Šค์ผ€์–ด")
355
  submit_button = gr.Button("์•„์ด๋””์–ด ์ƒ์„ฑํ•˜๊ธฐ")
 
 
356
 
357
  with gr.Column(scale=2):
358
- with gr.TabItem("์ฐฝ์˜์ ์ธ ๋ชจ๋ธ/์ปจ์…‰/ํ˜•์ƒ ๋ณ€ํ™” ์•„์ด๋””์–ด", id="creative_tab"):
359
- # Markdown ์ถœ๋ ฅ
360
- idea_output = gr.Markdown(label="์•„์ด๋””์–ด ๊ฒฐ๊ณผ")
361
-
362
- gr.Examples(
363
- examples=[
364
- ["์ž๋™์ฐจ", "", ""],
365
- ["์Šค๋งˆํŠธํฐ", "์ธ๊ณต์ง€๋Šฅ", ""],
366
- ["๋“œ๋ก ", "์ธ๊ณต์ง€๋Šฅ", ""],
367
- ["์šด๋™ํ™”", "์›จ์–ด๋Ÿฌ๋ธ”", "๊ฑด๊ฐ•"],
368
- ],
369
- inputs=[text_input1, text_input2, text_input3],
370
- )
371
-
372
- # stream=True ์˜ต์…˜์„ ํ†ตํ•ด ํ•จ์ˆ˜๊ฐ€ yieldํ•˜๋Š” ๋ฌธ์ž์—ด์„ ์‹ค์‹œ๊ฐ„ ์ถœ๋ ฅ
373
  submit_button.click(
374
- fn=process_inputs_stream,
375
  inputs=[text_input1, text_input2, text_input3],
376
- outputs=idea_output,
377
- stream=True
 
 
 
 
 
378
  )
379
 
380
  if __name__ == "__main__":
381
  demo.launch(debug=True)
 
 
1
+
2
+
3
  import os
4
  import gradio as gr
5
  import random
6
  import time
7
  import logging
8
+ from typing import Iterator
9
+
10
  import google.generativeai as genai
11
+ from gradio import ChatMessage # ChatMessage ๊ตฌ์กฐ ์‚ฌ์šฉ (Thinking/Response ๊ตฌ๋ถ„ ๊ฐ€๋Šฅ)
12
 
13
  logging.basicConfig(
14
  level=logging.INFO,
 
20
  )
21
  logger = logging.getLogger("idea_generator")
22
 
23
+ # Gemini API ํ‚ค ์„ค์ •
24
  GEMINI_API_KEY = os.getenv("GEMINI_API_KEY")
25
  genai.configure(api_key=GEMINI_API_KEY)
26
 
27
+ # ์‚ฌ์šฉํ•  Gemini 2.0 Flash ๋ชจ๋ธ (Thinking ๊ธฐ๋Šฅ ํฌํ•จ)
28
+ model = genai.GenerativeModel("gemini-2.0-flash-thinking-exp-01-21")
29
 
30
+ ##############################################################################
31
+ # ๋ณ€ํ™˜ ๋ฌธ์ž์—ด์—์„œ ์Šฌ๋ž˜์‹œ("/")๋กœ ๊ตฌ๋ถ„๋œ ๋‘ ์˜ต์…˜ ์ค‘ ํ•˜๋‚˜ ์„ ํƒ
32
+ ##############################################################################
33
  def choose_alternative(transformation):
34
  if "/" not in transformation:
35
  return transformation
 
51
  else:
52
  return random.choice([left, right])
53
 
54
+ ##############################################################################
55
+ # ์นดํ…Œ๊ณ ๋ฆฌ ์‚ฌ์ „ (์ผ๋ถ€๋งŒ ๋ฐœ์ทŒ ๊ฐ€๋Šฅ. ์—ฌ๊ธฐ์„œ๋Š” ์˜ˆ์‹œ๋กœ 3๊ฐœ๋งŒ ์œ ์ง€)
56
+ ##############################################################################
57
 
58
  physical_transformation_categories = {
59
  "๊ณต๊ฐ„ ์ด๋™": [
 
195
  "๋ผ์ด๋‹ค ์„ผ์„œ/๊ฐ์ง€", "ํ„ฐ์น˜ ์„ผ์„œ/๊ฐ์ง€", "์ œ์Šค์ฒ˜ ์„ผ์„œ/๊ฐ์ง€", "์‹ฌ๋ฐ• ์„ผ์„œ/๊ฐ์ง€", "ํ˜ˆ์•• ์„ผ์„œ/๊ฐ์ง€"
196
  ]
197
  }
198
+ ##############################################################################
199
+ # ์ŠคํŠธ๋ฆฌ๋ฐ์šฉ Gemini API ํ•จ์ˆ˜:
200
+ # - 'Thinking' ๋‹จ๊ณ„(์•„์ด๋””์–ด ๋‚ด๋ถ€ ์ถ”๋ก )์™€ ์ตœ์ข… 'Response' ๋‹จ๊ณ„๋กœ ๊ตฌ์„ฑ
201
+ ##############################################################################
202
+ def query_gemini_api_stream(prompt: str) -> Iterator[str]:
203
+ """
204
+ Gemini 2.0 Flash with 'Thinking' ๋ถ€๋ถ„๊ณผ 'Response' ๋ถ€๋ถ„์„
205
+ ๋ถ„๋ฆฌํ•˜์—ฌ ์ŠคํŠธ๋ฆฌ๋ฐ(Chunk)์œผ๋กœ ์ œ๊ณตํ•œ๋‹ค.
206
+ """
207
+ # chat ์ดˆ๊ธฐํ™” (history ์—†์ด ๋‹จ๋ฐœ์„ฑ ํ˜ธ์ถœ)
208
+ chat = model.start_chat(history=[])
209
+ response = chat.send_message(prompt, stream=True)
210
+
211
+ thought_buffer = ""
212
+ response_buffer = ""
213
+ thinking_complete = False
214
 
215
+ for chunk in response:
216
+ # ๊ฐ chunk์—๋Š” candidates[0].content.parts๊ฐ€ ๋“ค์–ด์žˆ๋‹ค
217
+ parts = chunk.candidates[0].content.parts
218
+
219
+ # ์˜ˆ์‹œ) parts๊ฐ€ 2๊ฐœ์ด๋ฉด (0: Thinking, 1: Response ์‹œ์ž‘)
220
+ # ๊ทธ ์™ธ์—๋Š” 1๊ฐœ์”ฉ ๋Š์–ด์„œ ๋“ค์–ด์˜ฌ ์ˆ˜ ์žˆ์Œ
221
+ if len(parts) == 2 and not thinking_complete:
222
+ # ์•„์ง Thinking ์ค‘์ธ๋ฐ, ์™„์„ฑ๋œ Thinking + Response ์‹œ์ž‘์ด ํ•œ ๋ฒˆ์— ์˜ด
223
+ thought_buffer += parts[0].text
224
+ yield f"[Thinking Chunk] {parts[0].text}"
225
+
226
+ response_buffer = parts[1].text
227
+ yield f"[Response Start] {parts[1].text}"
228
+
229
+ thinking_complete = True
230
+ elif thinking_complete:
231
+ # ์ด๋ฏธ Thinking์€ ๋๋‚จ โ†’ Response๋ฅผ ์ด์–ด์„œ ์ŠคํŠธ๋ฆฌ๋ฐ
232
+ current_chunk = parts[0].text
233
+ response_buffer += current_chunk
234
+ yield current_chunk
235
+ else:
236
+ # Thinking ์ง„ํ–‰ ์ค‘ (parts๊ฐ€ 1๊ฐœ์”ฉ ์ถ”๊ฐ€๋จ)
237
+ current_chunk = parts[0].text
238
+ thought_buffer += current_chunk
239
+ yield f"[Thinking Chunk] {current_chunk}"
240
+
241
+ # ์ŠคํŠธ๋ฆฌ๋ฐ ๏ฟฝ๏ฟฝ๏ฟฝ๋ฃŒ ํ›„ ์ตœ์ข… ๊ฒฐ๊ณผ ํ•œ๋ฒˆ์— ์ œ๊ณตํ•  ์ˆ˜๋„ ์žˆ์Œ
242
+ yield f"\n[Final Response]\n{response_buffer}"
243
+
244
+ ##############################################################################
245
+ # ์นดํ…Œ๊ณ ๋ฆฌ๋ณ„ ๊ฐ„๋‹จ ์„ค๋ช…์„ 'Thinking' + 'Response'๋กœ ํ™•์žฅ (์ŠคํŠธ๋ฆฌ๋ฐ)
246
+ ##############################################################################
247
+ def enhance_with_llm_stream(base_description, obj_name, category) -> Iterator[str]:
248
+ """
249
+ ๊ธฐ์กด enhance_with_llm๋ฅผ ์ŠคํŠธ๋ฆฌ๋ฐ ํ˜•ํƒœ๋กœ ๋ฐ”๊พผ ํ•จ์ˆ˜:
250
+ 'Thinking' + 'Response' ๋‹จ๊ณ„๋ฅผ chunk๋กœ ์ˆœ์ฐจ ์ „๋‹ฌ
251
+ """
252
  prompt = f"""
253
  ๋‹ค์Œ์€ '{obj_name}'์˜ '{category}' ๊ด€๋ จ ๊ฐ„๋‹จํ•œ ์„ค๋ช…์ž…๋‹ˆ๋‹ค:
254
  "{base_description}"
 
257
  2) ํ˜์‹  ํฌ์ธํŠธ์™€ ๊ธฐ๋Šฅ์„ฑ ๋“ฑ์„ ์ค‘์‹ฌ์œผ๋กœ
258
  3~4๋ฌธ์žฅ์˜ ์•„์ด๋””์–ด๋กœ ํ™•์žฅํ•ด ์ฃผ์„ธ์š”.
259
  """
260
+ # query_gemini_api_stream()๋กœ๋ถ€ํ„ฐ chunk๋ฅผ ๋ฐ›์•„ ๊ทธ๋Œ€๋กœ yield
261
+ for chunk in query_gemini_api_stream(prompt):
262
+ yield chunk
263
 
264
+ ##############################################################################
265
+ # ํ•œ ํ‚ค์›Œ๋“œ(์˜ค๋ธŒ์ ํŠธ)์— ๋Œ€ํ•œ ๊ธฐ๋ณธ ์•„์ด๋””์–ด(์นดํ…Œ๊ณ ๋ฆฌ๋ณ„) ์ƒ์„ฑ
266
+ ##############################################################################
267
  def generate_single_object_transformations(obj):
268
  results = {}
269
  for category, transformations in physical_transformation_categories.items():
270
  transformation = choose_alternative(random.choice(transformations))
271
  base_description = f"{obj}์ด(๊ฐ€) {transformation} ํ˜„์ƒ์„ ๋ณด์ธ๋‹ค"
272
+ results[category] = {"base": base_description, "enhanced": ""}
273
  return results
274
 
275
+ ##############################################################################
276
+ # 2๊ฐœ ํ‚ค์›Œ๋“œ ์ƒํ˜ธ์ž‘์šฉ
277
+ ##############################################################################
278
  def generate_two_objects_interaction(obj1, obj2):
279
  results = {}
280
  for category, transformations in physical_transformation_categories.items():
 
284
  "{obj1}๊ณผ(์™€) {obj2}์ด(๊ฐ€) ์ถฉ๋Œํ•˜๋ฉด์„œ {change}๊ฐ€ ์ผ์–ด๋‚ฌ๋‹ค"
285
  ])
286
  base_description = template.format(obj1=obj1, obj2=obj2, change=transformation)
287
+ results[category] = {"base": base_description, "enhanced": ""}
288
  return results
289
 
290
+ ##############################################################################
291
+ # 3๊ฐœ ํ‚ค์›Œ๋“œ ์ƒํ˜ธ์ž‘์šฉ
292
+ ##############################################################################
293
  def generate_three_objects_interaction(obj1, obj2, obj3):
294
  results = {}
295
  for category, transformations in physical_transformation_categories.items():
 
299
  "{obj1}์ด(๊ฐ€) {obj2}์™€(๊ณผ) {obj3} ์‚ฌ์ด์—์„œ ๋งค๊ฐœ์ฒด ์—ญํ• ์„ ํ•˜๋ฉฐ {change}๋ฅผ ์ด‰์ง„ํ–ˆ๋‹ค"
300
  ])
301
  base_description = template.format(obj1=obj1, obj2=obj2, obj3=obj3, change=transformation)
302
+ results[category] = {"base": base_description, "enhanced": ""}
 
 
 
 
 
 
 
303
  return results
304
 
305
+ ##############################################################################
306
+ # ์‹ค์ œ ๋ณ€ํ™˜ ์ƒ์„ฑ ๋กœ์ง
307
+ ##############################################################################
308
  def generate_transformations(text1, text2=None, text3=None):
309
  if text2 and text3:
310
  results = generate_three_objects_interaction(text1, text2, text3)
 
315
  else:
316
  results = generate_single_object_transformations(text1)
317
  objects = [text1]
318
+ return results, objects
 
 
 
 
 
 
 
 
319
 
320
  ##############################################################################
321
+ # ์ŠคํŠธ๋ฆฌ๋ฐ: ๊ฐ ์นดํ…Œ๊ณ ๋ฆฌ๋ณ„๋กœ 'Thinking' + 'Response' ๋ถ€๋ถ„์„ ์‹ค์‹œ๊ฐ„ ์ „๋‹ฌ
322
  ##############################################################################
323
+ def process_inputs_stream(text1, text2, text3) -> Iterator[list]:
324
+ """
325
+ Gradio์˜ Chatbot ํ˜•์‹์— ๋งž์ถฐ์„œ,
326
+ [(role='assistant'|'user', content=...), ...] ํ˜•ํƒœ๋กœ yieldํ•œ๋‹ค.
327
+ ์ƒ๊ฐ(Thinking) ๋‹จ๊ณ„์™€ ์ตœ์ข… ์‘๋‹ต์„ ๋ถ„๋ฆฌํ•ด์„œ ์‹ค์‹œ๊ฐ„ ์ „์†ก.
328
+ """
329
+ messages = []
330
+
331
+ # 1) ์ž…๋ ฅ๊ฐ’ ํ™•์ธ
332
+ yield [("assistant", "์ž…๋ ฅ๊ฐ’ ํ™•์ธ ์ค‘...")]
333
  time.sleep(0.3)
334
+
335
  text1 = text1.strip() if text1 else None
336
  text2 = text2.strip() if text2 else None
337
  text3 = text3.strip() if text3 else None
338
  if not text1:
339
+ yield [("assistant", "์˜ค๋ฅ˜: ์ตœ์†Œ ํ•˜๋‚˜์˜ ํ‚ค์›Œ๋“œ๋ฅผ ์ž…๋ ฅํ•ด์ฃผ์„ธ์š”.")]
340
+ return
341
+
342
+ # 2) ์•„์ด๋””์–ด ์ƒ์„ฑ
343
+ yield [("assistant", "์ฐฝ์˜์ ์ธ ๋ชจ๋ธ/์ปจ์…‰/ํ˜•์ƒ ๋ณ€ํ™” ์•„์ด๋””์–ด ์ƒ์„ฑ ์ค‘... (์นดํ…Œ๊ณ ๋ฆฌ๋ณ„ ๋ถ„์„)")]
 
 
 
 
 
 
 
344
  time.sleep(0.3)
345
+ results, objects = generate_transformations(text1, text2, text3)
 
 
 
 
 
 
 
 
346
 
347
+ # ์นดํ…Œ๊ณ ๋ฆฌ๋ณ„ ์ŠคํŠธ๋ฆฌ๋ฐ ์ฒ˜๋ฆฌ
348
+ obj_name = " ๋ฐ ".join([obj for obj in objects if obj])
 
 
349
 
350
+ for i, (category, result_dict) in enumerate(results.items(), start=1):
351
+ base_desc = result_dict["base"]
352
 
353
+ # ์นดํ…Œ๊ณ ๋ฆฌ ์•ˆ๋‚ด ์ถœ๋ ฅ
354
+ yield [("assistant", f"**[{i}/{len(results)}] ์นดํ…Œ๊ณ ๋ฆฌ:** {category}\n\n๊ธฐ๋ณธ ์•„์ด๋””์–ด: {base_desc}\n\n์ง€๊ธˆ๋ถ€ํ„ฐ Thinking + Response๋ฅผ ๋‹จ๊ณ„์ ์œผ๋กœ ์ŠคํŠธ๋ฆฌ๋ฐํ•ฉ๋‹ˆ๋‹ค...")]
355
+ time.sleep(0.5)
356
+
357
+ # ์ŠคํŠธ๋ฆฌ๋ฐ LLM ํ˜ธ์ถœ
358
+ thinking_text = ""
359
+ response_text = ""
360
+ is_thinking_done = False
361
+
362
+ # enhance_with_llm_stream ํ˜ธ์ถœ
363
+ for chunk in enhance_with_llm_stream(base_desc, obj_name, category):
364
+ if chunk.startswith("[Thinking Chunk]"):
365
+ # ์ƒ๊ฐ ํŒŒํŠธ
366
+ thinking_text += chunk.replace("[Thinking Chunk]", "")
367
+ messages_to_user = f"**[Thinking]**\n{thinking_text}"
368
+ yield [("assistant", messages_to_user)]
369
+ elif chunk.startswith("[Response Start]"):
370
+ # ์‘๋‹ต ์‹œ์ž‘ ์‹œ์ 
371
+ is_thinking_done = True
372
+ # ๋‚จ์•„์žˆ๋Š” ๋ถ€๋ถ„์€ response_text๋กœ
373
+ partial = chunk.replace("[Response Start]", "")
374
+ response_text += partial
375
+ messages_to_user = f"**[Final Response ์‹œ์ž‘]**\n{partial}"
376
+ yield [("assistant", messages_to_user)]
377
+ elif chunk.startswith("[Final Response]"):
378
+ # ์ตœ์ข… ์ข…๋ฃŒ
379
+ final = chunk.replace("[Final Response]", "")
380
+ response_text += f"\n{final}"
381
+ yield [("assistant", f"**[์ตœ์ข… Response]**\n{response_text.strip()}")]
382
+ else:
383
+ # ์ผ๋ฐ˜ ์‘๋‹ต ์ŠคํŠธ๋ฆฌ๋ฐ
384
+ if is_thinking_done:
385
+ response_text += chunk
386
+ yield [("assistant", f"**[์‘๋‹ต ์ง„ํ–‰]**\n{response_text}") ]
387
+ else:
388
+ thinking_text += chunk
389
+ yield [("assistant", f"**[Thinking]**\n{thinking_text}")]
390
+
391
+ # ํ•œ ์นดํ…Œ๊ณ ๋ฆฌ ์‘๋‹ต ์™„๋ฃŒ
392
+ result_dict["enhanced"] = response_text
393
+
394
+ # 3) ์ „์ฒด ์นดํ…Œ๊ณ ๋ฆฌ ์™„๋ฃŒ
395
+ yield [("assistant", "**๋ชจ๋“  ์นดํ…Œ๊ณ ๋ฆฌ์— ๋Œ€ํ•œ ์ŠคํŠธ๋ฆฌ๋ฐ์ด ์™„๋ฃŒ๋˜์—ˆ์Šต๋‹ˆ๋‹ค!**")]
396
+
397
+
398
+ ##############################################################################
399
+ # Gradio UI
400
+ ##############################################################################
401
+ with gr.Blocks(title="์ŠคํŠธ๋ฆฌ๋ฐ ์˜ˆ์ œ: Gemini 2.0 Flash Thinking",
402
  theme=gr.themes.Soft(primary_hue="teal", secondary_hue="slate", neutral_hue="neutral")) as demo:
403
+
404
+ gr.Markdown("# ๐Ÿš€ ํ‚ค์›Œ๋“œ ๊ธฐ๋ฐ˜ ์ฐฝ์˜์  ๋ณ€ํ™” ์•„์ด๋””์–ด (Gemini 2.0 Flash Thinking, Streaming)")
405
+ gr.Markdown("ํ‚ค์›Œ๋“œ 1~3๊ฐœ๋ฅผ ์ž…๋ ฅํ•˜๋ฉด, **์นดํ…Œ๊ณ ๋ฆฌ๋ณ„๋กœ** 'Thinking'๊ณผ 'Response'๊ฐ€ ์‹ค์‹œ๊ฐ„ ์ŠคํŠธ๋ฆฌ๋ฐ๋ฉ๋‹ˆ๋‹ค.")
406
+
407
+ chatbot = gr.Chatbot(
408
+ label="์นดํ…Œ๊ณ ๋ฆฌ๋ณ„ ์•„์ด๋””์–ด(Thinking + Response) ์ŠคํŠธ๋ฆฌ๋ฐ",
409
+ type="tuple", # (role, content) ์Œ์˜ ๋ฆฌ์ŠคํŠธ๋กœ ์ „๋‹ฌ
410
+ render_markdown=True
411
+ )
412
+
 
 
 
 
 
 
 
 
413
  with gr.Row():
414
  with gr.Column(scale=1):
415
+ text_input1 = gr.Textbox(label="ํ‚ค์›Œ๋“œ 1 (ํ•„์ˆ˜)", placeholder="์˜ˆ: ์ž๋™์ฐจ")
416
+ text_input2 = gr.Textbox(label="ํ‚ค์›Œ๋“œ 2 (์„ ํƒ)", placeholder="์˜ˆ: ๋กœ๋ด‡")
417
+ text_input3 = gr.Textbox(label="ํ‚ค์›Œ๋“œ 3 (์„ ํƒ)", placeholder="์˜ˆ: ์ธ๊ณต์ง€๋Šฅ")
418
  submit_button = gr.Button("์•„์ด๋””์–ด ์ƒ์„ฑํ•˜๊ธฐ")
419
+
420
+ clear_button = gr.Button("๋Œ€ํ™” ์ง€์šฐ๊ธฐ")
421
 
422
  with gr.Column(scale=2):
423
+ # ์ด๋ฏธ chatbot์ด ์ž๋ฆฌ๋ฅผ ์ฐจ์ง€ํ•˜๋ฏ€๋กœ ํŒจ์Šค
424
+ pass
425
+
426
+ def clear_chat():
427
+ return []
428
+
429
+ examples = [
430
+ ["์ž๋™์ฐจ", "", ""],
431
+ ["์Šค๋งˆํŠธํฐ", "์ธ๊ณต์ง€๋Šฅ", ""],
432
+ ["๋“œ๋ก ", "์ธ๊ณต์ง€๋Šฅ", ""],
433
+ ["์šด๋™ํ™”", "์›จ์–ด๋Ÿฌ๋ธ”", "๊ฑด๊ฐ•"],
434
+ ]
435
+ gr.Examples(examples=examples, inputs=[text_input1, text_input2, text_input3])
436
+
 
437
  submit_button.click(
438
+ fn=process_inputs_stream,
439
  inputs=[text_input1, text_input2, text_input3],
440
+ outputs=chatbot,
441
+ stream=True # ์ŠคํŠธ๋ฆฌ๋ฐ ์ถœ๋ ฅ
442
+ )
443
+
444
+ clear_button.click(
445
+ fn=clear_chat,
446
+ outputs=chatbot
447
  )
448
 
449
  if __name__ == "__main__":
450
  demo.launch(debug=True)
451
+