JiantaoLin
commited on
Commit
·
c8a948e
1
Parent(s):
4539b3a
new
Browse files
app.py
CHANGED
@@ -48,22 +48,6 @@ def install_cuda_toolkit():
|
|
48 |
print("==> finfish install")
|
49 |
install_cuda_toolkit()
|
50 |
|
51 |
-
@spaces.GPU
|
52 |
-
def check_gpu():
|
53 |
-
os.environ['CUDA_HOME'] = '/usr/local/cuda-12.1'
|
54 |
-
os.environ['PATH'] += ':/usr/local/cuda-12.1/bin'
|
55 |
-
# os.environ['LD_LIBRARY_PATH'] += ':/usr/local/cuda-12.1/lib64'
|
56 |
-
os.environ['LD_LIBRARY_PATH'] = "/usr/local/cuda-12.1/lib64:" + os.environ.get('LD_LIBRARY_PATH', '')
|
57 |
-
subprocess.run(['nvidia-smi']) # 测试 CUDA 是否可用
|
58 |
-
# 显式加载 libnvrtc.so.12
|
59 |
-
cuda_lib_path = "/usr/local/cuda-12.1/lib64/libnvrtc.so.12"
|
60 |
-
try:
|
61 |
-
ctypes.CDLL(cuda_lib_path, mode=ctypes.RTLD_GLOBAL)
|
62 |
-
print(f"Successfully preloaded {cuda_lib_path}")
|
63 |
-
except OSError as e:
|
64 |
-
print(f"Failed to preload {cuda_lib_path}: {e}")
|
65 |
-
print(f"torch.cuda.is_available:{torch.cuda.is_available()}")
|
66 |
-
check_gpu()
|
67 |
|
68 |
import base64
|
69 |
import re
|
@@ -114,6 +98,23 @@ def init_warpper():
|
|
114 |
k3d_wrapper = init_wrapper_from_config('./pipeline/pipeline_config/default.yaml')
|
115 |
init_warpper()
|
116 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
117 |
TEMP_MESH_ADDRESS=''
|
118 |
|
119 |
mesh_cache = None
|
|
|
48 |
print("==> finfish install")
|
49 |
install_cuda_toolkit()
|
50 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
51 |
|
52 |
import base64
|
53 |
import re
|
|
|
98 |
k3d_wrapper = init_wrapper_from_config('./pipeline/pipeline_config/default.yaml')
|
99 |
init_warpper()
|
100 |
|
101 |
+
@spaces.GPU
|
102 |
+
def check_gpu():
|
103 |
+
os.environ['CUDA_HOME'] = '/usr/local/cuda-12.1'
|
104 |
+
os.environ['PATH'] += ':/usr/local/cuda-12.1/bin'
|
105 |
+
# os.environ['LD_LIBRARY_PATH'] += ':/usr/local/cuda-12.1/lib64'
|
106 |
+
os.environ['LD_LIBRARY_PATH'] = "/usr/local/cuda-12.1/lib64:" + os.environ.get('LD_LIBRARY_PATH', '')
|
107 |
+
subprocess.run(['nvidia-smi']) # 测试 CUDA 是否可用
|
108 |
+
# 显式加载 libnvrtc.so.12
|
109 |
+
cuda_lib_path = "/usr/local/cuda-12.1/lib64/libnvrtc.so.12"
|
110 |
+
try:
|
111 |
+
ctypes.CDLL(cuda_lib_path, mode=ctypes.RTLD_GLOBAL)
|
112 |
+
print(f"Successfully preloaded {cuda_lib_path}")
|
113 |
+
except OSError as e:
|
114 |
+
print(f"Failed to preload {cuda_lib_path}: {e}")
|
115 |
+
print(f"torch.cuda.is_available:{torch.cuda.is_available()}")
|
116 |
+
check_gpu()
|
117 |
+
|
118 |
TEMP_MESH_ADDRESS=''
|
119 |
|
120 |
mesh_cache = None
|