OsamaMo's picture
Training in progress, step 500
93adfea verified
datasets<=3.2.0,>=2.16.0
accelerate<=1.2.1,>=0.34.0
peft<=0.12.0,>=0.11.1
trl<=0.9.6,>=0.8.6
tokenizers<=0.21.0,>=0.19.0
gradio<=5.18.0,>=4.38.0
pandas>=2.0.0
scipy
einops
sentencepiece
tiktoken
protobuf
uvicorn
pydantic
fastapi
sse-starlette
matplotlib>=3.7.0
fire
packaging
pyyaml
numpy<2.0.0
av
librosa
tyro<0.9.0
[:python_version < "3.10"]
transformers!=4.46.*,!=4.47.*,!=4.48.*,<=4.49.0,>=4.41.2
[:python_version >= "3.10"]
transformers!=4.46.*,!=4.47.*,!=4.48.0,<=4.49.0,>=4.41.2
[adam-mini]
adam-mini
[apollo]
apollo-torch
[aqlm]
aqlm[gpu]>=1.1.0
[awq]
autoawq
[badam]
badam>=1.2.1
[bitsandbytes]
bitsandbytes>=0.39.0
[deepspeed]
deepspeed<=0.16.2,>=0.10.0
[dev]
pre-commit
ruff
pytest
[eetq]
eetq
[galore]
galore-torch
[gptq]
optimum>=1.17.0
auto-gptq>=0.5.0
[hqq]
hqq
[liger-kernel]
liger-kernel
[metrics]
nltk
jieba
rouge-chinese
[minicpm_v]
soundfile
torchvision
torchaudio
vector_quantize_pytorch
vocos
msgpack
referencing
jsonschema_specifications
[modelscope]
modelscope
[openmind]
openmind
[qwen]
transformers_stream_generator
[swanlab]
swanlab
[torch]
torch>=1.13.1
[torch-npu]
torch==2.4.0
torch-npu==2.4.0.post2
decorator
[vllm]
vllm<=0.7.2,>=0.4.3