Spaces:
Running
on
Zero
Running
on
Zero
File size: 1,452 Bytes
d83ac8a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 |
def install_packages():
import subprocess
import sys
import importlib
def _is_package_available(name) -> bool:
try:
importlib.import_module(name)
return True
except (ImportError, ModuleNotFoundError):
return False
# upgrade pip
subprocess.run(
f"{sys.executable} -m pip install --upgrade pip", shell=True, check=True
)
subprocess.run(
f"{sys.executable} -m pip install --upgrade ninja wheel setuptools packaging", shell=True, check=True
)
# install ninja
if not _is_package_available("ninja"):
subprocess.run(f"{sys.executable} -m pip install ninja nvidia-cudnn-cu12==9.1.0.70 nvidia-cublas-cu12==12.4.5.8 torch==2.5.1 --extra-index-url https://download.pytorch.org/whl/cu124", shell=True, check=True)
# install flash attention
if not _is_package_available("flash_attn"):
subprocess.run(
f"{sys.executable} -m pip install -v -U flash-attention --no-build-isolation",
env={"MAX_JOBS": "1"},
shell=True,
check=True
)
# install xformers
if not _is_package_available("xformers"):
subprocess.run(
f"{sys.executable} -m pip install -v -U xformers nvidia-cudnn-cu12==9.1.0.70 nvidia-cublas-cu12==12.4.5.8 torch==2.5.1 --extra-index-url https://download.pytorch.org/whl/cu124",
shell=True,
check=True
)
|