File size: 5,058 Bytes
69620c8
 
 
 
 
 
6960db5
54d0511
31c0b50
 
 
 
6960db5
 
 
8c89a89
69620c8
8c89a89
 
69620c8
8c89a89
c1bd24e
 
69620c8
 
1c71c6c
 
 
 
 
 
 
 
 
 
 
 
 
c401dbb
8c89a89
6960db5
 
 
 
 
 
 
 
 
 
8c89a89
1c71c6c
8c89a89
 
 
 
 
 
69620c8
 
 
 
8c89a89
69620c8
 
4f076f3
69620c8
 
 
6960db5
69620c8
 
 
 
 
 
 
 
 
4f076f3
69620c8
1c71c6c
 
 
 
 
4b8e8f5
 
 
1c71c6c
4b8e8f5
1c71c6c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4b8e8f5
1c71c6c
 
4b8e8f5
c0fd987
1c71c6c
 
 
 
4b8e8f5
 
69620c8
8c89a89
 
69620c8
 
 
4b8e8f5
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
import spaces
import gradio as gr
import torch
from PIL import Image
from diffusers import DiffusionPipeline
import random
from transformers import pipeline

torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
torch.backends.cuda.matmul.allow_tf32 = True

# ๋ฒˆ์—ญ ๋ชจ๋ธ ์ดˆ๊ธฐํ™”
translator = pipeline("translation", model="Helsinki-NLP/opus-mt-ko-en")

# ๊ธฐ๋ณธ ๋ชจ๋ธ ๋ฐ LoRA ์„ค์ •
base_model = "black-forest-labs/FLUX.1-dev"
model_lora_repo = "Motas/Flux_Fashion_Photography_Style"  # ํŒจ์…˜ ๋ชจ๋ธ LoRA
clothes_lora_repo = "prithivMLmods/Canopus-Clothing-Flux-LoRA"  # ์˜๋ฅ˜ LoRA

pipe = DiffusionPipeline.from_pretrained(base_model, torch_dtype=torch.bfloat16)
pipe.to("cuda")

MAX_SEED = 2**32-1

# ์˜ˆ์‹œ ํ”„๋กฌํ”„ํŠธ ์ •์˜
model_examples = [
    "professional fashion model wearing elegant black dress in studio lighting",
    "fashion model in casual street wear, urban background",
    "high fashion model in avant-garde outfit on runway"
]

clothes_examples = [
    "luxurious red evening gown with detailed embroidery",
    "casual denim jacket with vintage wash",
    "modern minimalist white blazer with clean lines"
]

@spaces.GPU()
def generate_fashion(prompt, mode, cfg_scale, steps, randomize_seed, seed, width, height, lora_scale, progress=gr.Progress(track_tqdm=True)):
    # ํ•œ๊ธ€ ๊ฐ์ง€ ๋ฐ ๋ฒˆ์—ญ
    def contains_korean(text):
        return any(ord('๊ฐ€') <= ord(char) <= ord('ํžฃ') for char in text)
    
    if contains_korean(prompt):
        translated = translator(prompt)[0]['translation_text']
        actual_prompt = translated
    else:
        actual_prompt = prompt

    # ๋ชจ๋“œ์— ๋”ฐ๋ฅธ LoRA ๋ฐ ํŠธ๋ฆฌ๊ฑฐ์›Œ๋“œ ์„ค์ •
    if mode == "ํŒจ์…˜ ๋ชจ๋ธ ์ƒ์„ฑ":
        pipe.load_lora_weights(model_lora_repo)
        trigger_word = "fashion photography, professional model"
    else:
        pipe.load_lora_weights(clothes_lora_repo)
        trigger_word = "upper clothing, fashion item"

    if randomize_seed:
        seed = random.randint(0, MAX_SEED)
    generator = torch.Generator(device="cuda").manual_seed(seed)

    progress(0, "Starting fashion generation...")

    for i in range(1, steps + 1):
        if i % (steps // 10) == 0:
            progress(i / steps * 100, f"Processing step {i} of {steps}...")

    image = pipe(
        prompt=f"{actual_prompt} {trigger_word}",
        num_inference_steps=steps,
        guidance_scale=cfg_scale,
        width=width,
        height=height,
        generator=generator,
        joint_attention_kwargs={"scale": lora_scale},
    ).images[0]

    progress(100, "Completed!")
    return image, seed

def update_examples(mode):
    if mode == "ํŒจ์…˜ ๋ชจ๋ธ ์ƒ์„ฑ":
        return gr.Examples(examples=model_examples)
    else:
        return gr.Examples(examples=clothes_examples)

with gr.Blocks(theme="Yntec/HaleyCH_Theme_Orange") as app:
    gr.Markdown("# ๐ŸŽญ Fashion AI Studio")
    gr.Markdown("AI๋ฅผ ์‚ฌ์šฉํ•˜์—ฌ ํŒจ์…˜ ๋ชจ๋ธ๊ณผ ์˜๋ฅ˜๋ฅผ ์ƒ์„ฑํ•ด๋ณด์„ธ์š”")
    
    with gr.Column():
        # ๋ชจ๋“œ ์„ ํƒ
        mode = gr.Radio(
            choices=["ํŒจ์…˜ ๋ชจ๋ธ ์ƒ์„ฑ", "ํŒจ์…˜ ์˜๋ฅ˜ ์ƒ์„ฑ"],
            label="์ƒ์„ฑ ๋ชจ๋“œ",
            value="ํŒจ์…˜ ๋ชจ๋ธ ์ƒ์„ฑ"
        )
        
        # ํ”„๋กฌํ”„ํŠธ ์ž…๋ ฅ
        prompt = gr.TextArea(
            label="โœ๏ธ ํŒจ์…˜ ์„ค๋ช… (ํ•œ๊ธ€ ๋˜๋Š” ์˜์–ด)",
            placeholder="ํŒจ์…˜ ๋ชจ๋ธ์ด๋‚˜ ์˜๋ฅ˜๋ฅผ ์„ค๋ช…ํ•˜์„ธ์š”...",
            lines=3
        )
        
        # ์˜ˆ์‹œ ์„น์…˜
        with gr.Column() as example_container:
            examples = gr.Examples(
                examples=model_examples,
                inputs=prompt,
                label="์˜ˆ์‹œ ํ”„๋กฌํ”„ํŠธ"
            )
        
        # ๊ฒฐ๊ณผ ์ด๋ฏธ์ง€
        result = gr.Image(label="์ƒ์„ฑ๋œ ์ด๋ฏธ์ง€")
        
        generate_button = gr.Button("๐Ÿš€ ์ด๋ฏธ์ง€ ์ƒ์„ฑ")
        
        # ๊ณ ๊ธ‰ ์„ค์ •
        with gr.Accordion("๐ŸŽจ ๊ณ ๊ธ‰ ์„ค์ •", open=False):
            with gr.Row():
                cfg_scale = gr.Slider(label="CFG Scale", minimum=1, maximum=20, value=7.0)
                steps = gr.Slider(label="Steps", minimum=1, maximum=100, value=30)
                lora_scale = gr.Slider(label="LoRA Scale", minimum=0, maximum=1, value=0.85)
            
            with gr.Row():
                width = gr.Slider(label="Width", minimum=256, maximum=1536, value=512)
                height = gr.Slider(label="Height", minimum=256, maximum=1536, value=768)
            
            with gr.Row():
                randomize_seed = gr.Checkbox(True, label="์‹œ๋“œ ๋žœ๋คํ™”")
                seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, value=42)

    # ์ด๋ฒคํŠธ ํ•ธ๋“ค๋Ÿฌ
    mode.change(
        fn=update_examples,
        inputs=[mode],
        outputs=[examples]
    )
    
    generate_button.click(
        generate_fashion,
        inputs=[prompt, mode, cfg_scale, steps, randomize_seed, seed, width, height, lora_scale],
        outputs=[result, seed]
    )

if __name__ == "__main__":
    app.launch(share=True)