File size: 5,794 Bytes
6742856 dd5d6cc cb7718b 1a0c9b6 cb7718b 2531620 cb7718b 2531620 cb7718b 2531620 cb7718b 2531620 cb7718b 2531620 cb7718b 2531620 cb7718b 2531620 cb7718b 2531620 cb7718b 2531620 0da9692 cb7718b 0da9692 cb7718b 2531620 cb7718b 0da9692 cb7718b 0da9692 202b398 cb7718b 2531620 cb7718b 202b398 cb7718b 2531620 cb7718b 2531620 cb7718b 202b398 cb7718b 202b398 cb7718b 202b398 dd5d6cc cb7718b dd5d6cc 6742856 cb7718b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 |
import gradio as gr
import torch
# Transformers์ ํ์ดํ๋ผ์ธ์ ์ด์ฉํด ๋ฒ์ญ์ฉ ํ์ดํ๋ผ์ธ ๋ก๋
from transformers import pipeline as translation_pipeline
translator = translation_pipeline("translation", model="Helsinki-NLP/opus-mt-ko-en", device="cpu")
# Diffusers ๋ชจ๋ธ ๋ก๋
from diffusers import DiffusionPipeline
# -- Stable Diffusion ๊ณ์ด ํ์ดํ๋ผ์ธ ์ค์ --
# (๋ชจ๋ธ ์์: black-forest-labs/FLUX.1-schnell -> ๋ง์ธ๋๋งต์ฉ์ผ๋ก ์ปค์คํ
๋ ๋ชจ๋ธ์ด์ง๋ง,
# ์ฌ๊ธฐ์๋ "์คํ ๋ฆฌ๋ณด๋" ์คํ์ผ ํ๋กฌํํธ๋ ์๋ ๊ฐ๋ฅ)
model_id = "black-forest-labs/FLUX.1-schnell"
pipe = DiffusionPipeline.from_pretrained(
model_id,
torch_dtype=torch.float32
).to("cpu") # CPU ์ฌ์ฉ
# ํ๊ธ ํ๋กฌํํธ๋ฅผ ์์ด๋ก ๋ณํํ๊ธฐ ์ํ ํฌํผ ํจ์
def translate_prompt_if_korean(prompt_text: str) -> str:
# ๊ฐ๋จํ, ๋ฌธ์์ด ๋ด์ ํ๊ธ์ด ํฌํจ๋์ด ์๋์ง ํ์ธ ํ ๋ฒ์ญ
# (์์ด ์
๋ ฅ์ผ ๊ฒฝ์ฐ ๋ฒ์ญ์ ์คํต)
if any("๊ฐ" <= ch <= "ํฃ" for ch in prompt_text):
result = translator(prompt_text)
return result[0]['translation_text']
return prompt_text
def generate_storyboard(
prompt,
width=768,
height=512,
num_inference_steps=10,
guidance_scale=7.5,
seed=42
):
# ๋ฒ์ญ ์ฒ๋ฆฌ (ํ๊ธ -> ์์ด)
prompt_en = translate_prompt_if_korean(prompt)
# ์๋ ์์ฑ
generator = torch.Generator(device="cpu").manual_seed(seed)
# ์ด๋ฏธ์ง ์์ฑ
with torch.autocast("cpu"):
result = pipe(
prompt=prompt_en,
width=width,
height=height,
num_inference_steps=num_inference_steps,
guidance_scale=guidance_scale,
generator=generator
).images[0]
return result
# --- ๋น์ฃผ์ผ & ์ธ๋ จ๋ UI๋ฅผ ์ํ CSS ---
custom_css = """
#title {
text-align: center;
font-size: 3em;
font-weight: bold;
margin: 20px 0;
color: #333;
}
#subtitle {
text-align: center;
color: #666;
margin-bottom: 30px;
font-size: 1.2em;
}
.gradio-container {
background: linear-gradient(120deg, #f8f8f8 0%, #ffffff 100%);
}
.input-panel, .output-panel {
background: white;
border-radius: 12px;
padding: 20px;
box-shadow: 0 2px 10px rgba(0,0,0,0.1);
}
#prompt-input {
font-size: 14px !important;
min-height: 140px !important;
}
.advanced-settings {
font-size: 0.9em;
color: #444;
}
.example-box {
background: #f9f9f9;
padding: 10px;
margin-top: 10px;
border-radius: 8px;
}
"""
# --- Gradio ์ธํฐํ์ด์ค ๊ตฌ์ฑ ---
with gr.Blocks(css=custom_css) as demo:
gr.Markdown("<div id='title'>Gini Storyboard</div>")
gr.Markdown("<div id='subtitle'>Generate a hand-drawn style storyboard in black & white film noir or any style you wish!</div>")
with gr.Row():
with gr.Column(elem_classes="input-panel", scale=1):
prompt = gr.Textbox(
label="Storyboard Prompt",
placeholder="Enter your scene descriptions here (in English or Korean)",
lines=8,
elem_id="prompt-input"
)
seed = gr.Slider(
label="Seed",
value=42,
minimum=0,
maximum=999999,
step=1
)
with gr.Row():
width = gr.Slider(
label="Width",
minimum=256,
maximum=1280,
value=768,
step=64
)
height = gr.Slider(
label="Height",
minimum=256,
maximum=1280,
value=512,
step=64
)
with gr.Accordion("Advanced Settings", open=False, elem_classes="advanced-settings"):
num_inference_steps = gr.Slider(
label="Number of inference steps",
value=10,
minimum=1,
maximum=50,
step=1
)
guidance_scale = gr.Slider(
label="Guidance Scale",
value=7.5,
minimum=0.0,
maximum=20.0,
step=0.5
)
run_button = gr.Button("Generate Storyboard", variant="primary")
with gr.Column(elem_classes="output-panel", scale=1):
result = gr.Image(label="Storyboard Result")
# ์์ ํ๋กฌํํธ
gr.Markdown("### Example Prompt")
with gr.Box(elem_classes="example-box"):
example_text = (
"A hand-drawn storyboard style, film noir theme, black and white.\n"
"SCENE 1: A detective enters a dark alley [Frame 1]\n"
"SCENE 2: He notices a shadow [Frame 2]\n"
"SCENE 3: A sudden flash of light reveals a clue [Frame 3]"
)
gr.Markdown(f"```\n{example_text}\n```")
example_button = gr.Button("Use Example")
# ์์ ๋ฒํผ ํด๋ฆญ ์ ํ๋กฌํํธ์ ๋ฐ์
def load_example():
return example_text
example_button.click(fn=load_example, outputs=[prompt])
# ๋ฒํผ ํด๋ฆญ & ํ๋กฌํํธ Enter ์ด๋ฒคํธ ์ฒ๋ฆฌ
run_button.click(
fn=generate_storyboard,
inputs=[prompt, width, height, num_inference_steps, guidance_scale, seed],
outputs=[result]
)
prompt.submit(
fn=generate_storyboard,
inputs=[prompt, width, height, num_inference_steps, guidance_scale, seed],
outputs=[result]
)
# ์คํ
if __name__ == "__main__":
demo.queue()
demo.launch(server_name="0.0.0.0", server_port=7860, share=False)
|