Spaces:
Paused
Paused
Update app.py
Browse files
app.py
CHANGED
@@ -71,19 +71,23 @@ def translate_to_english(text: str) -> str:
|
|
71 |
print(f"Translation error: {str(e)}")
|
72 |
return text
|
73 |
|
|
|
74 |
print("Initializing FLUX pipeline...")
|
75 |
try:
|
76 |
pipe = FluxPipeline.from_pretrained(
|
77 |
"black-forest-labs/FLUX.1-dev",
|
78 |
-
torch_dtype=torch.float16,
|
79 |
use_auth_token=HF_TOKEN,
|
80 |
-
safety_checker=None
|
|
|
|
|
81 |
)
|
82 |
print("FLUX pipeline initialized successfully")
|
83 |
|
84 |
-
# ๋ฉ๋ชจ๋ฆฌ ์ต์ ํ ์ค์
|
85 |
-
pipe.enable_attention_slicing(slice_size=
|
86 |
pipe.enable_model_cpu_offload() # CPU ์คํ๋ก๋ฉ ํ์ฑํ
|
|
|
87 |
print("Pipeline optimization settings applied")
|
88 |
|
89 |
except Exception as e:
|
@@ -93,34 +97,28 @@ except Exception as e:
|
|
93 |
# LoRA ๊ฐ์ค์น ๋ก๋ ๋ถ๋ถ ์์
|
94 |
print("Loading LoRA weights...")
|
95 |
try:
|
96 |
-
# LoRA ํ์ผ ๊ฒฝ๋ก ํ์ธ
|
97 |
lora_path = hf_hub_download(
|
98 |
repo_id="openfree/myt-flux-fantasy",
|
99 |
-
filename="myt-flux-fantasy.safetensors",
|
100 |
use_auth_token=HF_TOKEN
|
101 |
)
|
102 |
print(f"LoRA weights downloaded to: {lora_path}")
|
103 |
|
104 |
-
# LoRA ๊ฐ์ค์น ๋ก๋
|
105 |
-
pipe.load_lora_weights(lora_path)
|
106 |
pipe.fuse_lora(lora_scale=0.125)
|
|
|
|
|
|
|
|
|
|
|
107 |
print("LoRA weights loaded and fused successfully")
|
108 |
|
109 |
except Exception as e:
|
110 |
print(f"Error loading LoRA weights: {str(e)}")
|
111 |
-
|
112 |
-
|
113 |
|
114 |
-
# GPU ์ด๋ ๋ถ๋ถ ์์
|
115 |
-
if torch.cuda.is_available():
|
116 |
-
try:
|
117 |
-
print("Moving pipeline to GPU...")
|
118 |
-
pipe = pipe.to("cuda:0")
|
119 |
-
print("Pipeline successfully moved to GPU")
|
120 |
-
print(f"Current device: {pipe.device}")
|
121 |
-
except Exception as e:
|
122 |
-
print(f"Warning: Could not move pipeline to CUDA: {str(e)}")
|
123 |
-
print("Falling back to CPU")
|
124 |
|
125 |
|
126 |
# ์ ์ฅ ๋๋ ํ ๋ฆฌ ์ค์
|
@@ -139,6 +137,7 @@ def save_generated_image(image, prompt):
|
|
139 |
image.save(filepath)
|
140 |
return filepath
|
141 |
|
|
|
142 |
@spaces.GPU(duration=60)
|
143 |
def generate_image(
|
144 |
prompt: str,
|
@@ -153,18 +152,19 @@ def generate_image(
|
|
153 |
try:
|
154 |
print(f"\nStarting image generation with prompt: {prompt}")
|
155 |
|
156 |
-
#
|
|
|
|
|
157 |
translated_prompt = translate_to_english(prompt)
|
158 |
print(f"Translated prompt: {translated_prompt}")
|
159 |
|
160 |
if randomize_seed:
|
161 |
seed = random.randint(0, MAX_SEED)
|
162 |
-
print(f"Using seed: {seed}")
|
163 |
|
164 |
generator = torch.Generator(device=device).manual_seed(seed)
|
165 |
|
166 |
-
|
167 |
-
with torch.inference_mode():
|
168 |
image = pipe(
|
169 |
prompt=translated_prompt,
|
170 |
width=width,
|
@@ -172,20 +172,20 @@ def generate_image(
|
|
172 |
num_inference_steps=num_inference_steps,
|
173 |
guidance_scale=guidance_scale,
|
174 |
generator=generator,
|
|
|
175 |
).images[0]
|
176 |
-
print("Image generation completed successfully")
|
177 |
|
178 |
filepath = save_generated_image(image, translated_prompt)
|
179 |
-
|
|
|
|
|
180 |
|
181 |
return image, seed
|
182 |
|
183 |
except Exception as e:
|
184 |
print(f"Error in generate_image: {str(e)}")
|
185 |
-
print(f"Full error details: {repr(e)}")
|
186 |
-
raise gr.Error(f"Image generation failed: {str(e)}")
|
187 |
-
finally:
|
188 |
clear_memory()
|
|
|
189 |
|
190 |
def add_text_with_stroke(draw, text, x, y, font, text_color, stroke_width):
|
191 |
"""ํ
์คํธ์ ์ธ๊ณฝ์ ์ ์ถ๊ฐํ๋ ํจ์"""
|
|
|
71 |
print(f"Translation error: {str(e)}")
|
72 |
return text
|
73 |
|
74 |
+
# FLUX ํ์ดํ๋ผ์ธ ์ด๊ธฐํ ๋ถ๋ถ ์์
|
75 |
print("Initializing FLUX pipeline...")
|
76 |
try:
|
77 |
pipe = FluxPipeline.from_pretrained(
|
78 |
"black-forest-labs/FLUX.1-dev",
|
79 |
+
torch_dtype=torch.float16, # ๋ฐ์ ๋ฐ๋ ์ฌ์ฉ
|
80 |
use_auth_token=HF_TOKEN,
|
81 |
+
safety_checker=None,
|
82 |
+
variant="fp16", # fp16 ๋ณํ ์ฌ์ฉ
|
83 |
+
device_map="auto" # ์๋ ์ฅ์น ๋งคํ
|
84 |
)
|
85 |
print("FLUX pipeline initialized successfully")
|
86 |
|
87 |
+
# ๋ฉ๋ชจ๋ฆฌ ์ต์ ํ ์ค์ ๊ฐํ
|
88 |
+
pipe.enable_attention_slicing(slice_size=1) # ๋ ์์ ์ฌ๋ผ์ด์ค ํฌ๊ธฐ
|
89 |
pipe.enable_model_cpu_offload() # CPU ์คํ๋ก๋ฉ ํ์ฑํ
|
90 |
+
pipe.enable_sequential_cpu_offload() # ์์ฐจ์ CPU ์คํ๋ก๋ฉ
|
91 |
print("Pipeline optimization settings applied")
|
92 |
|
93 |
except Exception as e:
|
|
|
97 |
# LoRA ๊ฐ์ค์น ๋ก๋ ๋ถ๋ถ ์์
|
98 |
print("Loading LoRA weights...")
|
99 |
try:
|
|
|
100 |
lora_path = hf_hub_download(
|
101 |
repo_id="openfree/myt-flux-fantasy",
|
102 |
+
filename="myt-flux-fantasy.safetensors",
|
103 |
use_auth_token=HF_TOKEN
|
104 |
)
|
105 |
print(f"LoRA weights downloaded to: {lora_path}")
|
106 |
|
107 |
+
# LoRA ๊ฐ์ค์น ๋ก๋ (๋ฉ๋ชจ๋ฆฌ ํจ์จ์ ๋ฐฉ์)
|
108 |
+
pipe.load_lora_weights(lora_path, adapter_name="fantasy")
|
109 |
pipe.fuse_lora(lora_scale=0.125)
|
110 |
+
|
111 |
+
# ๋ถํ์ํ ๋ฉ๋ชจ๋ฆฌ ์ ๋ฆฌ
|
112 |
+
torch.cuda.empty_cache()
|
113 |
+
gc.collect()
|
114 |
+
|
115 |
print("LoRA weights loaded and fused successfully")
|
116 |
|
117 |
except Exception as e:
|
118 |
print(f"Error loading LoRA weights: {str(e)}")
|
119 |
+
raise ValueError("Failed to load LoRA weights")
|
120 |
+
|
121 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
122 |
|
123 |
|
124 |
# ์ ์ฅ ๋๋ ํ ๋ฆฌ ์ค์
|
|
|
137 |
image.save(filepath)
|
138 |
return filepath
|
139 |
|
140 |
+
# generate_image ํจ์ ์์
|
141 |
@spaces.GPU(duration=60)
|
142 |
def generate_image(
|
143 |
prompt: str,
|
|
|
152 |
try:
|
153 |
print(f"\nStarting image generation with prompt: {prompt}")
|
154 |
|
155 |
+
# ๋ฉ๋ชจ๋ฆฌ ์ ๋ฆฌ
|
156 |
+
clear_memory()
|
157 |
+
|
158 |
translated_prompt = translate_to_english(prompt)
|
159 |
print(f"Translated prompt: {translated_prompt}")
|
160 |
|
161 |
if randomize_seed:
|
162 |
seed = random.randint(0, MAX_SEED)
|
|
|
163 |
|
164 |
generator = torch.Generator(device=device).manual_seed(seed)
|
165 |
|
166 |
+
# ๋ฐฐ์น ํฌ๊ธฐ 1๋ก ๊ณ ์ ํ์ฌ ๋ฉ๋ชจ๋ฆฌ ์ฌ์ฉ ์ต์ํ
|
167 |
+
with torch.inference_mode(), torch.cuda.amp.autocast():
|
168 |
image = pipe(
|
169 |
prompt=translated_prompt,
|
170 |
width=width,
|
|
|
172 |
num_inference_steps=num_inference_steps,
|
173 |
guidance_scale=guidance_scale,
|
174 |
generator=generator,
|
175 |
+
num_images_per_prompt=1,
|
176 |
).images[0]
|
|
|
177 |
|
178 |
filepath = save_generated_image(image, translated_prompt)
|
179 |
+
|
180 |
+
# ๋ฉ๋ชจ๋ฆฌ ์ ๋ฆฌ
|
181 |
+
clear_memory()
|
182 |
|
183 |
return image, seed
|
184 |
|
185 |
except Exception as e:
|
186 |
print(f"Error in generate_image: {str(e)}")
|
|
|
|
|
|
|
187 |
clear_memory()
|
188 |
+
raise gr.Error(f"Image generation failed: {str(e)}")
|
189 |
|
190 |
def add_text_with_stroke(draw, text, x, y, font, text_color, stroke_width):
|
191 |
"""ํ
์คํธ์ ์ธ๊ณฝ์ ์ ์ถ๊ฐํ๋ ํจ์"""
|