Spaces:
Paused
Paused
Update app.py
Browse files
app.py
CHANGED
|
@@ -208,7 +208,11 @@ def image_to_3d(trial_id: str, seed: int, randomize_seed: bool, ss_guidance_stre
|
|
| 208 |
return None, None
|
| 209 |
|
| 210 |
try:
|
| 211 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 212 |
|
| 213 |
if randomize_seed:
|
| 214 |
seed = np.random.randint(0, MAX_SEED)
|
|
@@ -231,58 +235,77 @@ def image_to_3d(trial_id: str, seed: int, randomize_seed: bool, ss_guidance_stre
|
|
| 231 |
image = image.resize(new_size, Image.LANCZOS)
|
| 232 |
print(f"Resized image to: {image.size}")
|
| 233 |
|
| 234 |
-
|
| 235 |
-
|
| 236 |
-
|
| 237 |
-
|
| 238 |
-
|
| 239 |
-
|
| 240 |
-
|
| 241 |
-
|
| 242 |
-
|
| 243 |
-
|
| 244 |
-
|
| 245 |
-
|
| 246 |
-
|
| 247 |
-
|
| 248 |
-
|
| 249 |
-
|
| 250 |
-
|
| 251 |
-
|
| 252 |
-
|
| 253 |
-
|
| 254 |
-
|
| 255 |
-
|
| 256 |
-
|
| 257 |
-
|
| 258 |
-
|
| 259 |
-
|
| 260 |
-
|
| 261 |
-
|
| 262 |
-
|
| 263 |
-
|
| 264 |
-
|
| 265 |
-
|
| 266 |
-
|
| 267 |
-
|
| 268 |
-
|
| 269 |
-
|
| 270 |
-
|
| 271 |
-
|
| 272 |
-
|
| 273 |
-
|
| 274 |
-
|
| 275 |
-
|
| 276 |
-
|
| 277 |
-
|
| 278 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 279 |
except Exception as e:
|
| 280 |
print(f"Error in image_to_3d: {str(e)}")
|
| 281 |
-
return None, None
|
| 282 |
-
finally:
|
| 283 |
if hasattr(g.trellis_pipeline, 'to'):
|
| 284 |
g.trellis_pipeline.to('cpu')
|
| 285 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 286 |
|
| 287 |
def clear_gpu_memory():
|
| 288 |
"""GPU ๋ฉ๋ชจ๋ฆฌ๋ฅผ ์ ๋ฆฌํ๋ ์ ํธ๋ฆฌํฐ ํจ์"""
|
|
@@ -326,7 +349,11 @@ def deactivate_button() -> gr.Button:
|
|
| 326 |
@spaces.GPU
|
| 327 |
def text_to_image(prompt: str, height: int, width: int, steps: int, scales: float, seed: int) -> Image.Image:
|
| 328 |
try:
|
| 329 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 330 |
|
| 331 |
# ํ๊ธ ๊ฐ์ง ๋ฐ ๋ฒ์ญ
|
| 332 |
def contains_korean(text):
|
|
@@ -343,7 +370,7 @@ def text_to_image(prompt: str, height: int, width: int, steps: int, scales: floa
|
|
| 343 |
width = min(width, 512)
|
| 344 |
steps = min(steps, 12)
|
| 345 |
|
| 346 |
-
with
|
| 347 |
generated_image = g.flux_pipe(
|
| 348 |
prompt=[formatted_prompt],
|
| 349 |
generator=torch.Generator('cuda').manual_seed(int(seed)),
|
|
@@ -368,7 +395,10 @@ def text_to_image(prompt: str, height: int, width: int, steps: int, scales: floa
|
|
| 368 |
print(f"Error in image generation: {str(e)}")
|
| 369 |
return None
|
| 370 |
finally:
|
| 371 |
-
|
|
|
|
|
|
|
|
|
|
| 372 |
|
| 373 |
with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
| 374 |
gr.Markdown("""## Craft3D""")
|
|
|
|
| 208 |
return None, None
|
| 209 |
|
| 210 |
try:
|
| 211 |
+
# CUDA ๋ฉ๋ชจ๋ฆฌ ์ด๊ธฐํ
|
| 212 |
+
if torch.cuda.is_available():
|
| 213 |
+
torch.cuda.empty_cache()
|
| 214 |
+
torch.cuda.synchronize()
|
| 215 |
+
gc.collect()
|
| 216 |
|
| 217 |
if randomize_seed:
|
| 218 |
seed = np.random.randint(0, MAX_SEED)
|
|
|
|
| 235 |
image = image.resize(new_size, Image.LANCZOS)
|
| 236 |
print(f"Resized image to: {image.size}")
|
| 237 |
|
| 238 |
+
# GPU ์์
์์
|
| 239 |
+
with torch.inference_mode():
|
| 240 |
+
try:
|
| 241 |
+
# ๋ชจ๋ธ์ GPU๋ก ์ด๋
|
| 242 |
+
g.trellis_pipeline.to('cuda')
|
| 243 |
+
torch.cuda.synchronize()
|
| 244 |
+
|
| 245 |
+
# 3D ์์ฑ
|
| 246 |
+
outputs = g.trellis_pipeline.run(
|
| 247 |
+
image,
|
| 248 |
+
seed=seed,
|
| 249 |
+
formats=["gaussian", "mesh"],
|
| 250 |
+
preprocess_image=False,
|
| 251 |
+
sparse_structure_sampler_params={
|
| 252 |
+
"steps": min(ss_sampling_steps, 12),
|
| 253 |
+
"cfg_strength": ss_guidance_strength,
|
| 254 |
+
},
|
| 255 |
+
slat_sampler_params={
|
| 256 |
+
"steps": min(slat_sampling_steps, 12),
|
| 257 |
+
"cfg_strength": slat_guidance_strength,
|
| 258 |
+
},
|
| 259 |
+
)
|
| 260 |
+
torch.cuda.synchronize()
|
| 261 |
+
|
| 262 |
+
# ๋น๋์ค ๋ ๋๋ง
|
| 263 |
+
video = render_utils.render_video(
|
| 264 |
+
outputs['gaussian'][0],
|
| 265 |
+
num_frames=60,
|
| 266 |
+
resolution=512
|
| 267 |
+
)['color']
|
| 268 |
+
torch.cuda.synchronize()
|
| 269 |
+
|
| 270 |
+
video_geo = render_utils.render_video(
|
| 271 |
+
outputs['mesh'][0],
|
| 272 |
+
num_frames=60,
|
| 273 |
+
resolution=512
|
| 274 |
+
)['normal']
|
| 275 |
+
torch.cuda.synchronize()
|
| 276 |
+
|
| 277 |
+
# CPU๋ก ๋ฐ์ดํฐ ์ด๋
|
| 278 |
+
video = [v.cpu().numpy() if torch.is_tensor(v) else v for v in video]
|
| 279 |
+
video_geo = [v.cpu().numpy() if torch.is_tensor(v) else v for v in video_geo]
|
| 280 |
+
|
| 281 |
+
video = [np.concatenate([video[i], video_geo[i]], axis=1) for i in range(len(video))]
|
| 282 |
+
new_trial_id = str(uuid.uuid4())
|
| 283 |
+
video_path = f"{TMP_DIR}/{new_trial_id}.mp4"
|
| 284 |
+
os.makedirs(os.path.dirname(video_path), exist_ok=True)
|
| 285 |
+
imageio.mimsave(video_path, video, fps=15)
|
| 286 |
+
|
| 287 |
+
# ์ํ ์ ์ฅ
|
| 288 |
+
state = pack_state(outputs['gaussian'][0], outputs['mesh'][0], new_trial_id)
|
| 289 |
+
|
| 290 |
+
return state, video_path
|
| 291 |
+
|
| 292 |
+
finally:
|
| 293 |
+
# ์ ๋ฆฌ ์์
|
| 294 |
+
g.trellis_pipeline.to('cpu')
|
| 295 |
+
if torch.cuda.is_available():
|
| 296 |
+
torch.cuda.empty_cache()
|
| 297 |
+
torch.cuda.synchronize()
|
| 298 |
+
gc.collect()
|
| 299 |
+
|
| 300 |
except Exception as e:
|
| 301 |
print(f"Error in image_to_3d: {str(e)}")
|
|
|
|
|
|
|
| 302 |
if hasattr(g.trellis_pipeline, 'to'):
|
| 303 |
g.trellis_pipeline.to('cpu')
|
| 304 |
+
if torch.cuda.is_available():
|
| 305 |
+
torch.cuda.empty_cache()
|
| 306 |
+
torch.cuda.synchronize()
|
| 307 |
+
gc.collect()
|
| 308 |
+
return None, None
|
| 309 |
|
| 310 |
def clear_gpu_memory():
|
| 311 |
"""GPU ๋ฉ๋ชจ๋ฆฌ๋ฅผ ์ ๋ฆฌํ๋ ์ ํธ๋ฆฌํฐ ํจ์"""
|
|
|
|
| 349 |
@spaces.GPU
|
| 350 |
def text_to_image(prompt: str, height: int, width: int, steps: int, scales: float, seed: int) -> Image.Image:
|
| 351 |
try:
|
| 352 |
+
# CUDA ๋ฉ๋ชจ๋ฆฌ ์ด๊ธฐํ
|
| 353 |
+
if torch.cuda.is_available():
|
| 354 |
+
torch.cuda.empty_cache()
|
| 355 |
+
torch.cuda.synchronize()
|
| 356 |
+
gc.collect()
|
| 357 |
|
| 358 |
# ํ๊ธ ๊ฐ์ง ๋ฐ ๋ฒ์ญ
|
| 359 |
def contains_korean(text):
|
|
|
|
| 370 |
width = min(width, 512)
|
| 371 |
steps = min(steps, 12)
|
| 372 |
|
| 373 |
+
with torch.inference_mode():
|
| 374 |
generated_image = g.flux_pipe(
|
| 375 |
prompt=[formatted_prompt],
|
| 376 |
generator=torch.Generator('cuda').manual_seed(int(seed)),
|
|
|
|
| 395 |
print(f"Error in image generation: {str(e)}")
|
| 396 |
return None
|
| 397 |
finally:
|
| 398 |
+
if torch.cuda.is_available():
|
| 399 |
+
torch.cuda.empty_cache()
|
| 400 |
+
torch.cuda.synchronize()
|
| 401 |
+
gc.collect()
|
| 402 |
|
| 403 |
with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
| 404 |
gr.Markdown("""## Craft3D""")
|