qwen-7b-chat / requirements.txt
ffreemt
Update transformers requirements.txt
d9c0a08
raw
history blame
497 Bytes
# https://www.zhihu.com/question/309583980
# cudatoolkit ?
# transformers==4.31.0
transformers # ==4.30.2
accelerate
tiktoken
einops
# flash-attention
# git clone -b v1.0.8 https://github.com/Dao-AILab/flash-attention
# cd flash-attention && pip install .
# pip install csrc/layer_norm
# pip install csrc/rotary
torch==2.0.1 # 2.0.1
safetensors
# bitsandbytes
# git+https://github.com/TimDettmers/bitsandbytes.git
bitsandbytes==0.39.0
transformers_stream_generator
scipy
loguru
about-time