|
import os |
|
import tempfile |
|
from PIL import Image |
|
import gradio as gr |
|
import logging |
|
import re |
|
import io |
|
from io import BytesIO |
|
|
|
from google import genai |
|
from google.genai import types |
|
|
|
|
|
from dotenv import load_dotenv |
|
load_dotenv() |
|
|
|
|
|
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') |
|
logger = logging.getLogger(__name__) |
|
|
|
|
|
def get_translation(korean_text): |
|
""" |
|
ํ๊ตญ์ด ํ
์คํธ๋ฅผ ์์ด๋ก ๋ฒ์ญ |
|
""" |
|
try: |
|
api_key = os.environ.get("GEMINI_API_KEY") |
|
if not api_key: |
|
logger.error("GEMINI_API_KEY๊ฐ ์ค์ ๋์ง ์์์ต๋๋ค.") |
|
return korean_text |
|
|
|
|
|
client = genai.GenerativeModel( |
|
model_name="gemini-2.0-flash", |
|
generation_config={ |
|
"temperature": 0.2, |
|
"max_output_tokens": 1024, |
|
"top_p": 0.9, |
|
}, |
|
system_instruction="You are a professional translator who translates Korean to English accurately.", |
|
api_key=api_key |
|
) |
|
|
|
|
|
translation_prompt = f""" |
|
Translate the following Korean text to English accurately: |
|
|
|
{korean_text} |
|
|
|
Provide only the translation, no explanations. |
|
""" |
|
|
|
|
|
response = client.generate_content(translation_prompt) |
|
|
|
|
|
if hasattr(response, 'text'): |
|
english_text = response.text.strip() |
|
logger.info(f"๋ฒ์ญ ๊ฒฐ๊ณผ: {english_text}") |
|
return english_text |
|
else: |
|
logger.warning("๋ฒ์ญ ์๋ต์ text ์์ฑ์ด ์์ต๋๋ค.") |
|
return korean_text |
|
except Exception as e: |
|
logger.exception(f"๋ฒ์ญ ์ค ์ค๋ฅ ๋ฐ์: {str(e)}") |
|
return korean_text |
|
|
|
def save_binary_file(file_name, data): |
|
with open(file_name, "wb") as f: |
|
f.write(data) |
|
|
|
def preprocess_prompt(prompt, image1=None, image2=None, image3=None): |
|
""" |
|
ํ๋กฌํํธ๋ฅผ ์ฒ๋ฆฌํ๊ณ ๊ธฐ๋ฅ ๋ช
๋ น์ ํด์ |
|
""" |
|
|
|
has_img1 = image1 is not None |
|
has_img2 = image2 is not None |
|
has_img3 = image3 is not None |
|
|
|
|
|
if "#1" in prompt and not has_img1: |
|
prompt = prompt.replace("#1", "์ฒซ ๋ฒ์งธ ์ด๋ฏธ์ง(์์)") |
|
else: |
|
prompt = prompt.replace("#1", "์ฒซ ๋ฒ์งธ ์ด๋ฏธ์ง") |
|
|
|
if "#2" in prompt and not has_img2: |
|
prompt = prompt.replace("#2", "๋ ๋ฒ์งธ ์ด๋ฏธ์ง(์์)") |
|
else: |
|
prompt = prompt.replace("#2", "๋ ๋ฒ์งธ ์ด๋ฏธ์ง") |
|
|
|
if "#3" in prompt and not has_img3: |
|
prompt = prompt.replace("#3", "์ธ ๋ฒ์งธ ์ด๋ฏธ์ง(์์)") |
|
else: |
|
prompt = prompt.replace("#3", "์ธ ๋ฒ์งธ ์ด๋ฏธ์ง") |
|
|
|
|
|
if "1. ์ด๋ฏธ์ง ๋ณ๊ฒฝ" in prompt: |
|
prompt = "์ฒซ ๋ฒ์งธ ์ด๋ฏธ์ง๋ฅผ ์ฐฝ์์ ์ผ๋ก ๋ณํํด์ฃผ์ธ์. ๋ ์์ํ๊ณ ์์ ์ ์ธ ๋ฒ์ ์ผ๋ก ๋ง๋ค์ด์ฃผ์ธ์." |
|
|
|
elif "2. ๊ธ์์ง์ฐ๊ธฐ" in prompt: |
|
prompt = "์ฒซ ๋ฒ์งธ ์ด๋ฏธ์ง์์ ๋ชจ๋ ํ
์คํธ๋ฅผ ์ฐพ์ ์์ฐ์ค๋ฝ๊ฒ ์ ๊ฑฐํด์ฃผ์ธ์. ๊น๋ํ ์ด๋ฏธ์ง๋ก ๋ง๋ค์ด์ฃผ์ธ์." |
|
|
|
elif "3. ์ผ๊ตด๋ฐ๊พธ๊ธฐ" in prompt: |
|
prompt = "์ฒซ ๋ฒ์งธ ์ด๋ฏธ์ง์ ์ธ๋ฌผ ์ผ๊ตด์ ๋ ๋ฒ์งธ ์ด๋ฏธ์ง์ ์ผ๊ตด๋ก ์์ฐ์ค๋ฝ๊ฒ ๊ต์ฒดํด์ฃผ์ธ์. ์ผ๊ตด์ ํ์ ๊ณผ ํน์ง์ ๋ ๋ฒ์งธ ์ด๋ฏธ์ง๋ฅผ ๋ฐ๋ฅด๋, ๋๋จธ์ง ๋ถ๋ถ์ ์ฒซ ๋ฒ์งธ ์ด๋ฏธ์ง๋ฅผ ์ ์งํด์ฃผ์ธ์." |
|
|
|
elif "4. ์ท๋ฐ๊พธ๊ธฐ" in prompt: |
|
|
|
if "#3" in prompt or "๋๋ #3" in prompt: |
|
prompt = "์ฒซ ๋ฒ์งธ ์ด๋ฏธ์ง์ ์ธ๋ฌผ ์์์ ๋ ๋ฒ์งธ ๋๋ ์ธ ๋ฒ์งธ ์ด๋ฏธ์ง์ ์์์ผ๋ก ์์ฐ์ค๋ฝ๊ฒ ๊ต์ฒดํด์ฃผ์ธ์. ์์์ ์คํ์ผ๊ณผ ์์์ ์ฐธ์กฐ ์ด๋ฏธ์ง๋ฅผ ๋ฐ๋ฅด๋, ์ ์ฒด ๋น์จ๊ณผ ํฌ์ฆ๋ ์ฒซ ๋ฒ์งธ ์ด๋ฏธ์ง๋ฅผ ์ ์งํด์ฃผ์ธ์." |
|
else: |
|
prompt = "์ฒซ ๋ฒ์งธ ์ด๋ฏธ์ง์ ์ธ๋ฌผ ์์์ ๋ ๋ฒ์งธ ์ด๋ฏธ์ง์ ์์์ผ๋ก ์์ฐ์ค๋ฝ๊ฒ ๊ต์ฒดํด์ฃผ์ธ์. ์์์ ์คํ์ผ๊ณผ ์์์ ๋ ๋ฒ์งธ ์ด๋ฏธ์ง๋ฅผ ๋ฐ๋ฅด๋, ์ ์ฒด ๋น์จ๊ณผ ํฌ์ฆ๋ ์ฒซ ๋ฒ์งธ ์ด๋ฏธ์ง๋ฅผ ์ ์งํด์ฃผ์ธ์." |
|
|
|
elif "5. ๋ฐฐ๊ฒฝ๋ฐ๊พธ๊ธฐ" in prompt: |
|
prompt = "์ฒซ ๋ฒ์งธ ์ด๋ฏธ์ง์ ๋ฐฐ๊ฒฝ์ ๋ ๋ฒ์งธ ์ด๋ฏธ์ง์ ๋ฐฐ๊ฒฝ์ผ๋ก ์์ฐ์ค๋ฝ๊ฒ ๊ต์ฒดํด์ฃผ์ธ์. ์ฒซ ๋ฒ์งธ ์ด๋ฏธ์ง์ ์ฃผ์ ํผ์ฌ์ฒด๋ ์ ์งํ๊ณ , ๋ ๋ฒ์งธ ์ด๋ฏธ์ง์ ๋ฐฐ๊ฒฝ๊ณผ ์กฐํ๋กญ๊ฒ ํฉ์ฑํด์ฃผ์ธ์." |
|
|
|
elif "6. ์ด๋ฏธ์ง ํฉ์ฑ(์ํํฌํจ)" in prompt: |
|
|
|
if "#3" in prompt or "๋๋ #3" in prompt: |
|
prompt = "์ฒซ ๋ฒ์งธ ์ด๋ฏธ์ง์ ๋ ๋ฒ์งธ, ์ธ ๋ฒ์งธ ์ด๋ฏธ์ง๋ฅผ ์์ฐ์ค๋ฝ๊ฒ ํฉ์ฑํด์ฃผ์ธ์. ๋ชจ๋ ์ด๋ฏธ์ง์ ์ฃผ์ ์์๋ฅผ ํฌํจํ๊ณ , ํนํ ์ํ์ด ์ ๋ณด์ด๋๋ก ์กฐํ๋กญ๊ฒ ํตํฉํด์ฃผ์ธ์." |
|
else: |
|
prompt = "์ฒซ ๋ฒ์งธ ์ด๋ฏธ์ง์ ๋ ๋ฒ์งธ ์ด๋ฏธ์ง๋ฅผ ์์ฐ์ค๋ฝ๊ฒ ํฉ์ฑํด์ฃผ์ธ์. ๋ ์ด๋ฏธ์ง์ ์ฃผ์ ์์๋ฅผ ํฌํจํ๊ณ , ํนํ ์ํ์ด ์ ๋ณด์ด๋๋ก ์กฐํ๋กญ๊ฒ ํตํฉํด์ฃผ์ธ์." |
|
|
|
elif "7. ์ด๋ฏธ์ง ํฉ์ฑ(์คํ์ผ์ ์ฉ)" in prompt: |
|
prompt = "์ฒซ ๋ฒ์งธ ์ด๋ฏธ์ง์ ๋ด์ฉ์ ๋ ๋ฒ์งธ ์ด๋ฏธ์ง์ ์คํ์ผ๋ก ๋ณํํด์ฃผ์ธ์. ์ฒซ ๋ฒ์งธ ์ด๋ฏธ์ง์ ์ฃผ์ ํผ์ฌ์ฒด์ ๊ตฌ๋๋ ์ ์งํ๋, ๋ ๋ฒ์งธ ์ด๋ฏธ์ง์ ์์ ์ ์คํ์ผ, ์์, ์ง๊ฐ์ ์ ์ฉํด์ฃผ์ธ์." |
|
|
|
|
|
elif "์ ๋ถ์์์ผ๋ก ๋ฐ๊ฟ๋ผ" in prompt or "๋ฅผ ๋ถ์์์ผ๋ก ๋ฐ๊ฟ๋ผ" in prompt: |
|
prompt = "์ฒซ ๋ฒ์งธ ์ด๋ฏธ์ง๋ฅผ ๋ถ์์ ํค์ผ๋ก ๋ณ๊ฒฝํด์ฃผ์ธ์. ์ ์ฒด์ ์ธ ์์์ ๋ถ์ ๊ณ์ด๋ก ์กฐ์ ํ๊ณ ์์ฐ์ค๋ฌ์ด ๋๋์ ์ ์งํด์ฃผ์ธ์." |
|
|
|
|
|
prompt += " ์ด๋ฏธ์ง๋ฅผ ์์ฑํด์ฃผ์ธ์." |
|
|
|
return prompt |
|
|
|
def generate_with_images(prompt, images): |
|
""" |
|
๊ณต์ ๋ฌธ์์ ๊ธฐ๋ฐํ ์ฌ๋ฐ๋ฅธ API ํธ์ถ ๋ฐฉ์ ๊ตฌํ |
|
์ฌ์๋ ๋ก์ง ์ถ๊ฐ |
|
""" |
|
max_retries = 2 |
|
retries = 0 |
|
|
|
while retries <= max_retries: |
|
try: |
|
|
|
api_key = os.environ.get("GEMINI_API_KEY") |
|
if not api_key: |
|
return None, "API ํค๊ฐ ์ค์ ๋์ง ์์์ต๋๋ค. ํ๊ฒฝ๋ณ์๋ฅผ ํ์ธํด์ฃผ์ธ์." |
|
|
|
|
|
client = genai.Client(api_key=api_key) |
|
|
|
|
|
english_prompt = get_translation(prompt) |
|
logger.info(f"์๋ณธ ํ๋กฌํํธ: {prompt}") |
|
logger.info(f"๋ฒ์ญ๋ ํ๋กฌํํธ: {english_prompt}") |
|
|
|
|
|
contents = [] |
|
|
|
|
|
contents.append(english_prompt) |
|
|
|
|
|
for idx, img in enumerate(images, 1): |
|
if img is not None: |
|
contents.append(img) |
|
logger.info(f"์ด๋ฏธ์ง #{idx} ์ถ๊ฐ๋จ") |
|
|
|
|
|
response = client.models.generate_content( |
|
model="gemini-2.0-flash-exp-image-generation", |
|
contents=contents, |
|
config=types.GenerateContentConfig( |
|
response_modalities=['Text', 'Image'], |
|
temperature=1, |
|
top_p=0.95, |
|
top_k=40, |
|
max_output_tokens=8192 |
|
) |
|
) |
|
|
|
|
|
with tempfile.NamedTemporaryFile(suffix=".png", delete=False) as tmp: |
|
temp_path = tmp.name |
|
|
|
result_text = "" |
|
image_found = False |
|
|
|
|
|
for part in response.candidates[0].content.parts: |
|
if hasattr(part, 'text') and part.text: |
|
result_text += part.text |
|
logger.info(f"์๋ต ํ
์คํธ: {part.text}") |
|
elif hasattr(part, 'inline_data') and part.inline_data: |
|
save_binary_file(temp_path, part.inline_data.data) |
|
image_found = True |
|
logger.info("์๋ต์์ ์ด๋ฏธ์ง ์ถ์ถ ์ฑ๊ณต") |
|
|
|
if not image_found: |
|
if retries < max_retries: |
|
retries += 1 |
|
logger.warning(f"์ด๋ฏธ์ง ์์ฑ ์คํจ, ์ฌ์๋ {retries}/{max_retries}") |
|
continue |
|
else: |
|
return None, f"API์์ ์ด๋ฏธ์ง๋ฅผ ์์ฑํ์ง ๋ชปํ์ต๋๋ค. ์๋ต ํ
์คํธ: {result_text}" |
|
|
|
|
|
result_img = Image.open(temp_path) |
|
if result_img.mode == "RGBA": |
|
result_img = result_img.convert("RGB") |
|
|
|
return result_img, f"์ด๋ฏธ์ง๊ฐ ์ฑ๊ณต์ ์ผ๋ก ์์ฑ๋์์ต๋๋ค. {result_text}" |
|
|
|
except Exception as e: |
|
if retries < max_retries: |
|
retries += 1 |
|
logger.warning(f"์ด๋ฏธ์ง ์์ฑ ์ค ์ค๋ฅ ๋ฐ์, ์ฌ์๋ {retries}/{max_retries}: {str(e)}") |
|
else: |
|
logger.exception("์ด๋ฏธ์ง ์์ฑ ์ค ์ค๋ฅ ๋ฐ์:") |
|
return None, f"์ค๋ฅ ๋ฐ์: {str(e)}" |
|
|
|
def process_images_with_prompt(image1, image2, image3, prompt): |
|
""" |
|
3๊ฐ์ ์ด๋ฏธ์ง์ ํ๋กฌํํธ๋ฅผ ์ฒ๋ฆฌํ๋ ํจ์ |
|
""" |
|
try: |
|
|
|
images = [img for img in [image1, image2, image3] if img is not None] |
|
|
|
|
|
if not prompt or not prompt.strip(): |
|
|
|
if len(images) == 0: |
|
prompt = "์๋ฆ๋ค์ด ์์ฐ ํ๊ฒฝ ์ด๋ฏธ์ง๋ฅผ ์์ฑํด์ฃผ์ธ์. ์ฐ, ํธ์, ํ๋์ด ํฌํจ๋ ํํ๋ก์ด ์ฅ๋ฉด์ด๋ฉด ์ข๊ฒ ์ต๋๋ค." |
|
logger.info("์ด๋ฏธ์ง ์์, ๊ธฐ๋ณธ ์์ฑ ํ๋กฌํํธ ์ฌ์ฉ") |
|
elif len(images) == 1: |
|
prompt = "์ด ์ด๋ฏธ์ง๋ฅผ ์ฐฝ์์ ์ผ๋ก ๋ณํํด์ฃผ์ธ์. ๋ ์์ํ๊ณ ์์ ์ ์ธ ๋ฒ์ ์ผ๋ก ๋ง๋ค์ด์ฃผ์ธ์." |
|
logger.info("๋จ์ผ ์ด๋ฏธ์ง ํ๋กฌํํธ ์๋ ์์ฑ") |
|
elif len(images) == 2: |
|
prompt = "์ด ๋ ์ด๋ฏธ์ง๋ฅผ ์์ฐ์ค๋ฝ๊ฒ ํฉ์ฑํด์ฃผ์ธ์. ๋ ์ด๋ฏธ์ง์ ์์๋ฅผ ์กฐํ๋กญ๊ฒ ํตํฉํ์ฌ ํ๋์ ์ด๋ฏธ์ง๋ก ๋ง๋ค์ด์ฃผ์ธ์." |
|
logger.info("๋ ์ด๋ฏธ์ง ํฉ์ฑ ํ๋กฌํํธ ์๋ ์์ฑ") |
|
else: |
|
prompt = "์ด ์ธ ์ด๋ฏธ์ง๋ฅผ ์ฐฝ์์ ์ผ๋ก ํฉ์ฑํด์ฃผ์ธ์. ๋ชจ๋ ์ด๋ฏธ์ง์ ์ฃผ์ ์์๋ฅผ ํฌํจํ๋ ์์ฐ์ค๋ฝ๊ณ ์ผ๊ด๋ ํ๋์ ์ฅ๋ฉด์ผ๋ก ๋ง๋ค์ด์ฃผ์ธ์." |
|
logger.info("์ธ ์ด๋ฏธ์ง ํฉ์ฑ ํ๋กฌํํธ ์๋ ์์ฑ") |
|
else: |
|
|
|
prompt = preprocess_prompt(prompt, image1, image2, image3) |
|
|
|
|
|
return generate_with_images(prompt, images) |
|
|
|
except Exception as e: |
|
logger.exception("์ด๋ฏธ์ง ์ฒ๋ฆฌ ์ค ์ค๋ฅ ๋ฐ์:") |
|
return None, f"์ค๋ฅ ๋ฐ์: {str(e)}" |
|
|
|
|
|
def update_prompt_from_function(function_choice): |
|
function_templates = { |
|
"1. ์ด๋ฏธ์ง ๋ณ๊ฒฝ": "#1์ ์ฐฝ์์ ์ผ๋ก ๋ณํํด์ฃผ์ธ์", |
|
"2. ๊ธ์์ง์ฐ๊ธฐ": "#1์์ ํ
์คํธ๋ฅผ ์ง์์ฃผ์ธ์", |
|
"3. ์ผ๊ตด๋ฐ๊พธ๊ธฐ": "#1์ ์ธ๋ฌผ์ #2์ ์ผ๊ตด๋ก ๋ฐ๊ฟ๋ผ", |
|
"4. ์ท๋ฐ๊พธ๊ธฐ": "#1์ ์ธ๋ฌผ์ #2 ๋๋ #3์ ์ท์ผ๋ก ๋ณ๊ฒฝํ๋ผ", |
|
"5. ๋ฐฐ๊ฒฝ๋ฐ๊พธ๊ธฐ": "#1์ ์ด๋ฏธ์ง์ #2์ ๋ฐฐ๊ฒฝ์ผ๋ก ์์ฐ์ค๋ฝ๊ฒ ๋ฐ๊ฟ๋ผ", |
|
"6. ์ด๋ฏธ์ง ํฉ์ฑ(์ํํฌํจ)": "#1์ #2 ๋๋ #3์ ๋ฅผ ํฉ์ฑํ๋ผ", |
|
"7. ์ด๋ฏธ์ง ํฉ์ฑ(์คํ์ผ์ ์ฉ)": "#1์ #2๋ฅผ ์คํ์ผ๋ก ๋ณํํ๋ผ" |
|
} |
|
|
|
return function_templates.get(function_choice, "") |
|
|
|
|
|
def create_interface(): |
|
with gr.Blocks() as demo: |
|
gr.HTML( |
|
""" |
|
<div style="text-align: center; margin-bottom: 1rem;"> |
|
<h1>๊ฐ๋จํ ์ด๋ฏธ์ง ์์ฑ๊ธฐ</h1> |
|
<p>์ด๋ฏธ์ง๋ฅผ ์
๋ก๋ํ๊ฑฐ๋ ํ๋กฌํํธ๋ง ์
๋ ฅํ์ฌ ์ด๋ฏธ์ง๋ฅผ ์์ฑํ ์ ์์ต๋๋ค.</p> |
|
</div> |
|
""" |
|
) |
|
|
|
with gr.Row(): |
|
with gr.Column(): |
|
|
|
with gr.Row(): |
|
image1_input = gr.Image(type="pil", label="#1 (์ ํ์ฌํญ)", image_mode="RGB") |
|
image2_input = gr.Image(type="pil", label="#2 (์ ํ์ฌํญ)", image_mode="RGB") |
|
image3_input = gr.Image(type="pil", label="#3 (์ ํ์ฌํญ)", image_mode="RGB") |
|
|
|
|
|
with gr.Row(): |
|
function_dropdown = gr.Dropdown( |
|
choices=[ |
|
"1. ์ด๋ฏธ์ง ๋ณ๊ฒฝ", |
|
"2. ๊ธ์์ง์ฐ๊ธฐ", |
|
"3. ์ผ๊ตด๋ฐ๊พธ๊ธฐ", |
|
"4. ์ท๋ฐ๊พธ๊ธฐ", |
|
"5. ๋ฐฐ๊ฒฝ๋ฐ๊พธ๊ธฐ", |
|
"6. ์ด๋ฏธ์ง ํฉ์ฑ(์ํํฌํจ)", |
|
"7. ์ด๋ฏธ์ง ํฉ์ฑ(์คํ์ผ์ ์ฉ)" |
|
], |
|
label="๊ธฐ๋ฅ ์ ํ (์ ํํ๋ฉด ์๋์ผ๋ก ํ๋กฌํํธ๊ฐ ๋ณ๊ฒฝ๋ฉ๋๋ค)", |
|
value=None |
|
) |
|
|
|
|
|
prompt_input = gr.Textbox( |
|
lines=3, |
|
placeholder="ํ๋กฌํํธ๋ฅผ ์
๋ ฅํ๊ฑฐ๋ ๋น์๋๋ฉด ์๋ ์์ฑ๋ฉ๋๋ค. ํ๊ตญ์ด๋ก ์
๋ ฅํ์ธ์.", |
|
label="ํ๋กฌํํธ" |
|
) |
|
|
|
|
|
submit_btn = gr.Button("์ด๋ฏธ์ง ์์ฑ", variant="primary") |
|
|
|
with gr.Column(): |
|
|
|
output_image = gr.Image(label="์์ฑ๋ ์ด๋ฏธ์ง") |
|
output_text = gr.Textbox(label="์ํ ๋ฉ์์ง") |
|
|
|
|
|
prompt_display = gr.Textbox(label="์ฌ์ฉ๋ ํ๋กฌํํธ", visible=True) |
|
|
|
|
|
function_dropdown.change( |
|
fn=update_prompt_from_function, |
|
inputs=[function_dropdown], |
|
outputs=[prompt_input] |
|
) |
|
|
|
|
|
def process_and_show_prompt(image1, image2, image3, prompt): |
|
try: |
|
|
|
result_img, status = process_images_with_prompt(image1, image2, image3, prompt) |
|
|
|
|
|
processed_prompt = preprocess_prompt(prompt, image1, image2, image3) |
|
|
|
return result_img, status, processed_prompt |
|
except Exception as e: |
|
logger.exception("์ฒ๋ฆฌ ์ค ์ค๋ฅ ๋ฐ์:") |
|
return None, f"์ค๋ฅ ๋ฐ์: {str(e)}", prompt |
|
|
|
submit_btn.click( |
|
fn=process_and_show_prompt, |
|
inputs=[image1_input, image2_input, image3_input, prompt_input], |
|
outputs=[output_image, output_text, prompt_display], |
|
) |
|
|
|
gr.Markdown( |
|
""" |
|
### ์ฌ์ฉ ๋ฐฉ๋ฒ: |
|
|
|
1. **์ด๋ฏธ์ง ์์ด ์์ฑ**: ํ๋กฌํํธ๋ง ์
๋ ฅํ์ฌ ์ด๋ฏธ์ง๋ฅผ ์์ฑํ ์ ์์ต๋๋ค |
|
2. **์๋ ํฉ์ฑ**: ์ด๋ฏธ์ง๋ง ์
๋ก๋ํ๊ณ ํ๋กฌํํธ๋ฅผ ๋น์๋๋ฉด ์๋์ผ๋ก ํฉ์ฑ๋ฉ๋๋ค |
|
3. **๊ธฐ๋ฅ ์ฌ์ฉ**: ๋๋กญ๋ค์ด์์ ์ํ๋ ๊ธฐ๋ฅ์ ์ ํํ๋ฉด ์๋์ผ๋ก ํ๋กฌํํธ๊ฐ ๋ณ๊ฒฝ๋ฉ๋๋ค |
|
4. **์ด๋ฏธ์ง ์ฐธ์กฐ**: #1, #2, #3์ผ๋ก ๊ฐ ์ด๋ฏธ์ง๋ฅผ ์ฐธ์กฐํ ์ ์์ต๋๋ค |
|
5. **์ผ๋ถ ์ด๋ฏธ์ง๋ง**: ํ์ํ ์ด๋ฏธ์ง๋ง ์
๋ก๋ํด๋ ๊ธฐ๋ฅ ์คํ์ด ๊ฐ๋ฅํฉ๋๋ค |
|
|
|
> **ํ**: ํ๊ตญ์ด๋ก ์
๋ ฅํ ํ๋กฌํํธ๋ ์๋์ผ๋ก ์์ด๋ก ๋ฒ์ญ๋์ด ์ฒ๋ฆฌ๋ฉ๋๋ค |
|
""" |
|
) |
|
|
|
return demo |
|
|
|
|
|
if __name__ == "__main__": |
|
demo = create_interface() |
|
demo.launch(share=True) |