Spaces:
Paused
Paused
Commit
·
f34278a
1
Parent(s):
00bd4d6
Update Dockerfile
Browse files- Dockerfile +3 -3
Dockerfile
CHANGED
@@ -26,7 +26,7 @@ ENV PATH="/usr/local/cuda/bin:$PATH" \
|
|
26 |
WORKDIR /app
|
27 |
|
28 |
# Download ggml and mmproj models from HuggingFace
|
29 |
-
RUN wget https://huggingface.co/mys/ggml_llava-v1.5-13b/resolve/main/ggml-model-
|
30 |
wget https://huggingface.co/mys/ggml_llava-v1.5-13b/resolve/main/mmproj-model-f16.gguf
|
31 |
|
32 |
# Clone and build llava-server with CUDA support
|
@@ -42,7 +42,7 @@ RUN useradd -m -u 1000 user && \
|
|
42 |
cp /app/ggml-model-q4_k.gguf /home/user/app && \
|
43 |
cp /app/mmproj-model-f16.gguf /home/user/app
|
44 |
|
45 |
-
RUN chown user:user /home/user/app/ggml-model-
|
46 |
chown user:user /home/user/app/mmproj-model-f16.gguf
|
47 |
|
48 |
USER user
|
@@ -54,4 +54,4 @@ WORKDIR $HOME/app
|
|
54 |
EXPOSE 8080
|
55 |
|
56 |
# Start the llava-server with models
|
57 |
-
CMD ["/app/llava-cpp-server/bin/llava-server", "-m", "ggml-model-
|
|
|
26 |
WORKDIR /app
|
27 |
|
28 |
# Download ggml and mmproj models from HuggingFace
|
29 |
+
RUN wget https://huggingface.co/mys/ggml_llava-v1.5-13b/resolve/main/ggml-model-q5_k.gguf && \
|
30 |
wget https://huggingface.co/mys/ggml_llava-v1.5-13b/resolve/main/mmproj-model-f16.gguf
|
31 |
|
32 |
# Clone and build llava-server with CUDA support
|
|
|
42 |
cp /app/ggml-model-q4_k.gguf /home/user/app && \
|
43 |
cp /app/mmproj-model-f16.gguf /home/user/app
|
44 |
|
45 |
+
RUN chown user:user /home/user/app/ggml-model-q5_k.gguf && \
|
46 |
chown user:user /home/user/app/mmproj-model-f16.gguf
|
47 |
|
48 |
USER user
|
|
|
54 |
EXPOSE 8080
|
55 |
|
56 |
# Start the llava-server with models
|
57 |
+
CMD ["/app/llava-cpp-server/bin/llava-server", "-m", "ggml-model-q5_k.gguf", "--mmproj", "mmproj-model-f16.gguf", "--host", "0.0.0.0"]
|