# Using the specified base image that's suited for llama-cpp-python FROM ghcr.io/abetlen/llama-cpp-python:latest VOLUME ["/models"] # Environment variables for model details ENV MODEL_NAME="llava-1.6-mistral-7b-gguf" ENV DEFAULT_MODEL_FILE="llava-v1.6-mistral-7b.Q3_K_XS.gguf" ENV MODEL_USER="cjpais" ENV DEFAULT_MODEL_BRANCH="main" ENV DEFAULT_CLIP_MODEL_FILE="mmproj-model-f16.gguf" ENV MODEL_URL="https://huggingface.co/${MODEL_USER}/${MODEL_NAME}/resolve/${DEFAULT_MODEL_BRANCH}/${DEFAULT_MODEL_FILE}" ENV CLIP_MODEL_URL="https://huggingface.co/${MODEL_USER}/${MODEL_NAME}/resolve/${DEFAULT_MODEL_BRANCH}/${DEFAULT_CLIP_MODEL_FILE}" # Set up the working directory WORKDIR /app # Ensure curl is available for downloading the models RUN apt-get update && apt-get install -y curl && \ apt-get clean && rm -rf /var/lib/apt/lists/* # Create a directory for the models RUN mkdir -p /models # Download the models RUN curl -L "${MODEL_URL}" -o /models/${DEFAULT_MODEL_FILE} && \ curl -L "${CLIP_MODEL_URL}" -o /models/${DEFAULT_CLIP_MODEL_FILE} ENV HOST=0.0.0.0 ENV PORT=8000 # Expose the port the server will run on EXPOSE 8000 # Command to run the server, using environment variables for model paths CMD ["python3", "-m", "llama_cpp.server", "--model", "/models/llava-v1.6-mistral-7b.Q3_K_XS.gguf", "--clip_model_path", "/models/mmproj-model-f16.gguf", "--chat_format", "llava-1-5"]