Spaces:
Paused
Paused
Update Dockerfile
Browse files- Dockerfile +3 -3
Dockerfile
CHANGED
@@ -30,8 +30,8 @@ RUN wget https://huggingface.co/mys/ggml_bakllava-1/resolve/main/ggml-model-q4_k
|
|
30 |
wget https://huggingface.co/mys/ggml_bakllava-1/resolve/main/mmproj-model-f16.gguf
|
31 |
|
32 |
# Clone and build llava-server with CUDA support
|
33 |
-
RUN git clone https://github.com/
|
34 |
-
cd
|
35 |
git submodule init && \
|
36 |
git submodule update && \
|
37 |
make LLAMA_CUBLAS=1
|
@@ -54,4 +54,4 @@ WORKDIR $HOME/app
|
|
54 |
EXPOSE 8080
|
55 |
|
56 |
# Start the llava-server with models
|
57 |
-
CMD ["/app/
|
|
|
30 |
wget https://huggingface.co/mys/ggml_bakllava-1/resolve/main/mmproj-model-f16.gguf
|
31 |
|
32 |
# Clone and build llava-server with CUDA support
|
33 |
+
RUN git clone https://github.com/ggerganov/llama.cpp.git && \
|
34 |
+
cd llama.cpp && \
|
35 |
git submodule init && \
|
36 |
git submodule update && \
|
37 |
make LLAMA_CUBLAS=1
|
|
|
54 |
EXPOSE 8080
|
55 |
|
56 |
# Start the llava-server with models
|
57 |
+
CMD ["/app/llama-cpp/server", "-m", "ggml-model-q4_k.gguf", "--mmproj", "mmproj-model-f16.gguf", "--host", "0.0.0.0", "--threads", "10"]
|