qwen2.5-VL-api / Dockerfile
bla's picture
Update Dockerfile
902b49b verified
raw
history blame
1.69 kB
# Use Python 3.10 as the base image
FROM python:3.10-slim
# Install system dependencies
RUN apt-get update && apt-get install -y \
ffmpeg \
git \
build-essential \
ninja-build \
&& rm -rf /var/lib/apt/lists/*
# Create a non-root user
RUN useradd -m -u 1000 user
WORKDIR /app
# Install Python dependencies in correct order
RUN pip install --no-cache-dir --upgrade pip && \
# Install numpy<2 first for compatibility
pip install --no-cache-dir 'numpy<2' && \
# Install PyTorch and other core dependencies
pip install --no-cache-dir \
torch \
torchvision \
ninja \
transformers \
accelerate \
qwen-vl-utils[decord]==0.0.8 \
fastapi \
uvicorn[standard] \
python-multipart \
pillow \
pydantic \
supervision && \
# Install AutoAWQ last
pip install --no-cache-dir autoawq[cpu]
RUN pip install git+https://github.com/huggingface/transformers accelerate
RUN pip install intel-extension-for-pytorch
# Copy application files
COPY --chown=user:user . /app
# Create required directories and set permissions
RUN mkdir -p /home/user/.cache/huggingface && \
mkdir -p /temp && \
chown -R user:user /home/user/.cache && \
chown -R user:user /temp
# Switch to non-root user
USER user
# Set environment variables
ENV HOME=/home/user \
PATH=/home/user/.local/bin:$PATH \
TRANSFORMERS_CACHE=/home/user/.cache/huggingface \
TORCH_HOME=/home/user/.cache/torch \
HF_HOME=/home/user/.cache/huggingface
# Expose the port
EXPOSE 7860
# Command to run the application
CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "7860"]