Christoph Holthaus commited on
Commit
81c62e1
·
1 Parent(s): b8c846d
Files changed (2) hide show
  1. app.py +8 -12
  2. requirements.txt +1 -1
app.py CHANGED
@@ -11,18 +11,14 @@ llama_model_name = "TheBloke/dolphin-2.2.1-AshhLimaRP-Mistral-7B-GGUF"
11
  print("! INITING DONE !")
12
 
13
  # Preparing things to work
14
- title = "llama.cpp API"
15
- desc = '''<h1>Hello, world!</h1>
16
- This is showcase how to make own server with Llama2 model.<br>
17
- I'm using here 7b model just for example. Also here's only CPU power.<br>
18
- But you can use GPU power as well!<br><br>
19
- <h1>How to GPU?</h1>
20
- Change <code>`CMAKE_ARGS="-DLLAMA_BLAS=ON -DLLAMA_BLAS_VENDOR=OpenBLAS`</code> in Dockerfile on <code>`CMAKE_ARGS="-DLLAMA_CUBLAS=on"`</code>. Also you can try <code>`DLLAMA_CLBLAST`</code> or <code>`DLLAMA_METAL`</code>.<br><br>
21
- <h1>How to test it on own machine?</h1>
22
- You can install Docker, build image and run it. I made <code>`run-docker.sh`</code> for ya. To stop container run <code>`docker ps`</code>, find name of container and run <code>`docker stop _dockerContainerName_`</code><br>
23
- Or you can once follow steps in Dockerfile and try it on your machine, not in Docker.<br>
24
- <br>''' + f"Memory used: {psutil.virtual_memory()[2]}<br>" + '''
25
- Powered by <a href="https://github.com/abetlen/llama-cpp-python">llama-cpp-python</a> and <a href="https://www.gradio.app/">Gradio</a>.<br><br>'''
26
 
27
  # Loading prompt
28
  prompt = ""
 
11
  print("! INITING DONE !")
12
 
13
  # Preparing things to work
14
+ title = "Demo for 7B Models - Quantized"
15
+ desc = '''<h1>Demo for 7B Models - Quantized</h1>
16
+ Quantized to run in the free tier hosting.
17
+ Have a quick way to test models or share them with others without hassle.
18
+ It runs slow, as it's on cpu. Usable for basic tests.
19
+ It uses quantized models in gguf-Format and llama.cpp to run them.
20
+ <br>''' + f"DEBUG: Memory used: {psutil.virtual_memory()[2]}<br>" + '''
21
+ Powered by ...'''
 
 
 
 
22
 
23
  # Loading prompt
24
  prompt = ""
requirements.txt CHANGED
@@ -1,3 +1,3 @@
1
  psutil
2
  gradio
3
- llama_cpp
 
1
  psutil
2
  gradio
3
+ llama-cpp-python