ginipick commited on
Commit
ec0ff5f
·
verified ·
1 Parent(s): 3bee602

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -13
app.py CHANGED
@@ -24,18 +24,23 @@ import gc
24
  def clear_memory():
25
  """메모리 정리 함수"""
26
  gc.collect()
27
- if torch.cuda.is_available():
28
- try:
29
- torch.cuda.empty_cache()
30
- except:
31
- pass
 
 
 
 
32
 
33
  # GPU 설정을 try-except로 감싸기
34
  if torch.cuda.is_available():
35
  try:
36
- torch.cuda.empty_cache()
37
- torch.backends.cudnn.benchmark = True
38
- torch.backends.cuda.matmul.allow_tf32 = True
 
39
  except:
40
  print("Warning: Could not configure CUDA settings")
41
 
@@ -62,8 +67,6 @@ BoundingBox = tuple[int, int, int, int]
62
  pillow_heif.register_heif_opener()
63
  pillow_heif.register_avif_opener()
64
 
65
- device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
66
-
67
  # HF 토큰 설정
68
  HF_TOKEN = os.getenv("HF_TOKEN")
69
  if HF_TOKEN is None:
@@ -106,9 +109,9 @@ pipe.fuse_lora(lora_scale=0.125)
106
  # GPU 설정을 try-except로 감싸기
107
  try:
108
  if torch.cuda.is_available():
109
- pipe.to("cuda")
110
- except:
111
- print("Warning: Could not move pipeline to CUDA")
112
 
113
  class timer:
114
  def __init__(self, method_name="timed process"):
 
24
  def clear_memory():
25
  """메모리 정리 함수"""
26
  gc.collect()
27
+ try:
28
+ if torch.cuda.is_available():
29
+ with torch.cuda.device(0): # 명시적으로 device 0 사용
30
+ torch.cuda.empty_cache()
31
+ except:
32
+ pass
33
+
34
+ # GPU 설정
35
+ device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") # 명시적으로 cuda:0 지정
36
 
37
  # GPU 설정을 try-except로 감싸기
38
  if torch.cuda.is_available():
39
  try:
40
+ with torch.cuda.device(0):
41
+ torch.cuda.empty_cache()
42
+ torch.backends.cudnn.benchmark = True
43
+ torch.backends.cuda.matmul.allow_tf32 = True
44
  except:
45
  print("Warning: Could not configure CUDA settings")
46
 
 
67
  pillow_heif.register_heif_opener()
68
  pillow_heif.register_avif_opener()
69
 
 
 
70
  # HF 토큰 설정
71
  HF_TOKEN = os.getenv("HF_TOKEN")
72
  if HF_TOKEN is None:
 
109
  # GPU 설정을 try-except로 감싸기
110
  try:
111
  if torch.cuda.is_available():
112
+ pipe = pipe.to("cuda:0") # 명시적으로 cuda:0 지정
113
+ except Exception as e:
114
+ print(f"Warning: Could not move pipeline to CUDA: {str(e)}")
115
 
116
  class timer:
117
  def __init__(self, method_name="timed process"):