matthoffner commited on
Commit
3db094b
·
verified ·
1 Parent(s): 823ee94

Update Dockerfile

Browse files
Files changed (1) hide show
  1. Dockerfile +3 -3
Dockerfile CHANGED
@@ -30,8 +30,8 @@ RUN wget https://huggingface.co/mys/ggml_bakllava-1/resolve/main/ggml-model-q4_k
30
  wget https://huggingface.co/mys/ggml_bakllava-1/resolve/main/mmproj-model-f16.gguf
31
 
32
  # Clone and build llava-server with CUDA support
33
- RUN git clone https://github.com/matthoffner/llava-cpp-server.git && \
34
- cd llava-cpp-server && \
35
  git submodule init && \
36
  git submodule update && \
37
  make LLAMA_CUBLAS=1
@@ -54,4 +54,4 @@ WORKDIR $HOME/app
54
  EXPOSE 8080
55
 
56
  # Start the llava-server with models
57
- CMD ["/app/llava-cpp-server/bin/llava-server", "-m", "ggml-model-q4_k.gguf", "--mmproj", "mmproj-model-f16.gguf", "--host", "0.0.0.0", "--threads", "10"]
 
30
  wget https://huggingface.co/mys/ggml_bakllava-1/resolve/main/mmproj-model-f16.gguf
31
 
32
  # Clone and build llava-server with CUDA support
33
+ RUN git clone https://github.com/ggerganov/llama.cpp.git && \
34
+ cd llama.cpp && \
35
  git submodule init && \
36
  git submodule update && \
37
  make LLAMA_CUBLAS=1
 
54
  EXPOSE 8080
55
 
56
  # Start the llava-server with models
57
+ CMD ["/app/llama-cpp/server", "-m", "ggml-model-q4_k.gguf", "--mmproj", "mmproj-model-f16.gguf", "--host", "0.0.0.0", "--threads", "10"]