Spaces:
Paused
Paused
Update app.py
Browse files
app.py
CHANGED
@@ -375,37 +375,31 @@ def on_change_prompt(img: Image.Image | None, prompt: str | None, bg_prompt: str
|
|
375 |
return gr.update(interactive=bool(img and prompt))
|
376 |
|
377 |
|
378 |
-
@spaces.GPU(
|
379 |
def process_prompt(img: Image.Image, prompt: str, bg_prompt: str | None = None,
|
380 |
aspect_ratio: str = "1:1", position: str = "bottom-center",
|
381 |
-
scale_percent: float = 100, text_params: dict | None = None)
|
382 |
try:
|
383 |
-
#
|
384 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
385 |
|
386 |
-
with torch.cuda.amp.autocast(): # 자동 혼합 정밀도 사용
|
387 |
-
results, _ = _process(img, prompt, bg_prompt, aspect_ratio)
|
388 |
-
|
389 |
-
if bg_prompt:
|
390 |
-
combined = combine_with_background(
|
391 |
-
foreground=results[2],
|
392 |
-
background=results[1],
|
393 |
-
position=position,
|
394 |
-
scale_percent=scale_percent
|
395 |
-
)
|
396 |
-
|
397 |
-
if text_params and text_params.get('text'):
|
398 |
-
combined = add_text_to_image(combined, text_params)
|
399 |
-
|
400 |
-
return combined, results[2]
|
401 |
-
|
402 |
-
return results[1], results[2]
|
403 |
-
|
404 |
-
except Exception as e:
|
405 |
-
print(f"Error in process_prompt: {str(e)}")
|
406 |
-
raise gr.Error(str(e))
|
407 |
finally:
|
408 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
409 |
|
410 |
|
411 |
def process_bbox(img: Image.Image, box_input: str) -> tuple[Image.Image, Image.Image]:
|
@@ -810,6 +804,7 @@ if __name__ == "__main__":
|
|
810 |
queue=True
|
811 |
)
|
812 |
|
|
|
813 |
demo.launch(
|
814 |
server_name="0.0.0.0",
|
815 |
server_port=7860,
|
@@ -818,7 +813,5 @@ demo.launch(
|
|
818 |
enable_queue=True,
|
819 |
cache_examples=False,
|
820 |
show_error=True,
|
821 |
-
show_tips=False
|
822 |
-
max_size=1, # 큐 크기 제한
|
823 |
-
memory_limit="48Gi" # 메모리 제한 설정
|
824 |
)
|
|
|
375 |
return gr.update(interactive=bool(img and prompt))
|
376 |
|
377 |
|
378 |
+
@spaces.GPU()
|
379 |
def process_prompt(img: Image.Image, prompt: str, bg_prompt: str | None = None,
|
380 |
aspect_ratio: str = "1:1", position: str = "bottom-center",
|
381 |
+
scale_percent: float = 100, text_params: dict | None = None):
|
382 |
try:
|
383 |
+
# GPU 설정
|
384 |
+
if torch.cuda.is_available():
|
385 |
+
device = torch.device('cuda')
|
386 |
+
# 모델들을 GPU로 이동
|
387 |
+
gd_model.to(device)
|
388 |
+
segmenter.to(device)
|
389 |
+
pipe.to(device)
|
390 |
+
else:
|
391 |
+
device = torch.device('cpu')
|
392 |
+
|
393 |
+
# 나머지 처리 로직...
|
394 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
395 |
finally:
|
396 |
+
# GPU 메모리 정리
|
397 |
+
if torch.cuda.is_available():
|
398 |
+
try:
|
399 |
+
with torch.cuda.device('cuda'):
|
400 |
+
torch.cuda.empty_cache()
|
401 |
+
except Exception as e:
|
402 |
+
print(f"GPU cleanup warning: {e}")
|
403 |
|
404 |
|
405 |
def process_bbox(img: Image.Image, box_input: str) -> tuple[Image.Image, Image.Image]:
|
|
|
804 |
queue=True
|
805 |
)
|
806 |
|
807 |
+
# Gradio 실행 설정 수정
|
808 |
demo.launch(
|
809 |
server_name="0.0.0.0",
|
810 |
server_port=7860,
|
|
|
813 |
enable_queue=True,
|
814 |
cache_examples=False,
|
815 |
show_error=True,
|
816 |
+
show_tips=False
|
|
|
|
|
817 |
)
|