zamal commited on
Commit
efdbe7a
·
verified ·
1 Parent(s): 1677dba

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -1
app.py CHANGED
@@ -5,7 +5,7 @@ import torch
5
  import subprocess
6
 
7
  # Run pip install command
8
- subprocess.run(["pip", "install", "bitsandbytes_*.whl"])
9
 
10
 
11
  # Define the repository for the quantized model
 
5
  import subprocess
6
 
7
  # Run pip install command
8
+ subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True)
9
 
10
 
11
  # Define the repository for the quantized model