ginipick commited on
Commit
e6920c4
·
verified ·
1 Parent(s): 96169f0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +22 -2
app.py CHANGED
@@ -22,6 +22,14 @@ from diffusers import FluxPipeline
22
  # 상단에 import 추가
23
  from transformers import pipeline, AutoTokenizer, AutoModelForSeq2SeqLM
24
 
 
 
 
 
 
 
 
 
25
 
26
  model_name = "Helsinki-NLP/opus-mt-ko-en"
27
  tokenizer = AutoTokenizer.from_pretrained(model_name)
@@ -323,10 +331,12 @@ def on_change_prompt(img: Image.Image | None, prompt: str | None, bg_prompt: str
323
  return gr.update(interactive=bool(img and prompt))
324
 
325
 
 
326
  def process_prompt(img: Image.Image, prompt: str, bg_prompt: str | None = None,
327
  aspect_ratio: str = "1:1", position: str = "bottom-center",
328
  scale_percent: float = 100) -> tuple[Image.Image, Image.Image]:
329
  try:
 
330
  if img is None or prompt.strip() == "":
331
  raise gr.Error("Please provide both image and prompt")
332
 
@@ -362,6 +372,9 @@ def process_prompt(img: Image.Image, prompt: str, bg_prompt: str | None = None,
362
  print(f"Error in process_prompt: {str(e)}")
363
  raise gr.Error(str(e))
364
 
 
 
 
365
  def process_bbox(img: Image.Image, box_input: str) -> tuple[Image.Image, Image.Image]:
366
  try:
367
  if img is None or box_input.strip() == "":
@@ -667,5 +680,12 @@ with gr.Blocks(theme=gr.themes.Soft(), css=css) as demo:
667
  </div>
668
  </div>
669
  """)
670
- demo.queue(max_size=30, api_open=False)
671
- demo.launch()
 
 
 
 
 
 
 
 
22
  # 상단에 import 추가
23
  from transformers import pipeline, AutoTokenizer, AutoModelForSeq2SeqLM
24
 
25
+ import gc
26
+
27
+ def clear_memory():
28
+ """메모리 정리 함수"""
29
+ gc.collect()
30
+ torch.cuda.empty_cache()
31
+
32
+
33
 
34
  model_name = "Helsinki-NLP/opus-mt-ko-en"
35
  tokenizer = AutoTokenizer.from_pretrained(model_name)
 
331
  return gr.update(interactive=bool(img and prompt))
332
 
333
 
334
+
335
  def process_prompt(img: Image.Image, prompt: str, bg_prompt: str | None = None,
336
  aspect_ratio: str = "1:1", position: str = "bottom-center",
337
  scale_percent: float = 100) -> tuple[Image.Image, Image.Image]:
338
  try:
339
+ clear_memory() # 처리 전 메모리 정리
340
  if img is None or prompt.strip() == "":
341
  raise gr.Error("Please provide both image and prompt")
342
 
 
372
  print(f"Error in process_prompt: {str(e)}")
373
  raise gr.Error(str(e))
374
 
375
+ finally:
376
+ clear_memory() # 처리 후 메모리 정리
377
+
378
  def process_bbox(img: Image.Image, box_input: str) -> tuple[Image.Image, Image.Image]:
379
  try:
380
  if img is None or box_input.strip() == "":
 
680
  </div>
681
  </div>
682
  """)
683
+ demo.queue(max_size=20) # 큐 크기 제한
684
+ demo.launch(
685
+ server_name="0.0.0.0",
686
+ server_port=7860,
687
+ share=False,
688
+ enable_queue=True,
689
+ max_threads=4, # 스레드 수 제한
690
+ allowed_paths=["examples"]
691
+ )