FROM nvidia/cuda:12.0.0-cudnn8-devel-ubuntu22.04 ENV DEBIAN_FRONTEND=noninteractive # Install dependencies RUN apt update && \ apt install --no-install-recommends -y build-essential python3 python3-pip wget curl git cmake zlib1g-dev libblas-dev && \ apt install -y cuda-libraries-dev-12-2 && \ apt clean && rm -rf /var/lib/apt/lists/* WORKDIR /app # Download ggml and mmproj models from HuggingFace RUN wget https://huggingface.co/mys/ggml_llava-v1.5-13b/raw/main/ggml-model-q4_k.gguf && \ wget https://huggingface.co/mys/ggml_llava-v1.5-13b/raw/main/mmproj-model-f16.gguf # Clone and build llava-server with CUDA support RUN git clone https://github.com/matthoffner/llava-cpp-server.git && \ cd llava-cpp-server && \ git submodule init && \ git submodule update && \ LLAMA_CUBLAS=1 make # Create a non-root user for security reasons RUN useradd -m -u 1000 user && \ mkdir -p /home/user/app && \ cp /app/ggml-model-q4_k.gguf /home/user/app && \ cp /app/mmproj-model-f16.gguf /home/user/app USER user ENV HOME=/home/user WORKDIR $HOME/app # Expose the port EXPOSE 8080 RUN ls -al # Start the llava-server with models CMD ["/app/llava-cpp-server/bin/llava-server", "-m", "/home/user/app/ggml-model-q4_k.gguf", "--mmproj", "/home/user/app/mmproj-model-f16.gguf", "--host", "0.0.0