Spaces:
Paused
Paused
| # Base image | |
| FROM ghcr.io/ggerganov/llama.cpp:full-cuda | |
| ENV DEBIAN_FRONTEND=noninteractive | |
| # Update and install necessary dependencies | |
| RUN apt update && \ | |
| apt install --no-install-recommends -y \ | |
| build-essential \ | |
| python3 \ | |
| python3-pip \ | |
| wget \ | |
| curl \ | |
| git \ | |
| cmake \ | |
| zlib1g-dev \ | |
| libblas-dev && \ | |
| apt clean && \ | |
| rm -rf /var/lib/apt/lists/* | |
| # Setting up CUDA environment variables (this may not be necessary since you're using the official nvidia/cuda image, but it's good to be explicit) | |
| ENV PATH="/usr/local/cuda/bin:$PATH" \ | |
| LD_LIBRARY_PATH="/usr/local/cuda/lib64:$LD_LIBRARY_PATH" \ | |
| CUDA_HOME="/usr/local/cuda" | |
| WORKDIR /app | |
| # Download ggml and mmproj models from HuggingFace | |
| RUN wget https://huggingface.co/cjpais/llava-1.6-mistral-7b-gguf/resolve/main/llava-v1.6-mistral-7b.Q5_K_M.gguf && \ | |
| wget https://huggingface.co/cjpais/llava-1.6-mistral-7b-gguf/resolve/main/mmproj-model-f16.gguf | |
| # Clone and build llava-server with CUDA support | |
| RUN ls -al | |
| RUN make LLAMA_CUBLAS=1 | |
| # Expose the port | |
| EXPOSE 8080 | |
| # Start the llava-server with models | |
| CMD ["--server", "--model", "llava-v1.6-mistral-7b.Q5_K_M.gguf", "--mmproj", "mmproj-model-f16.gguf", "--threads", "6", "--host", "0.0.0.0", "-ngl", "33"] |