qwen-7b-chat / requirements.txt
ffreemt
Update
a27c4fb
raw
history blame
242 Bytes
transformers==4.31.0
accelerate
tiktoken
einops
flash-attention
# git clone -b v1.0.8 https://github.com/Dao-AILab/flash-attention
# cd flash-attention && pip install .
# pip install csrc/layer_norm
# pip install csrc/rotary
torch # 2.0.1