File size: 844 Bytes
818675e
 
 
751c1e6
39a25f7
da994cb
818675e
751c1e6
 
 
 
9dd267d
dc2c4dd
 
 
4a42148
dc2c4dd
 
 
 
4a42148
 
ccc3e1a
dc2c4dd
ccc3e1a
 
818675e
a4611e8
818675e
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
from llama_cpp.server.app import create_app, Settings
from fastapi.responses import HTMLResponse
import os
import requests
from llama_cpp import Llama
import gradio as gr

url="https://huggingface.co/TheBloke/WizardLM-13B-V1.2-GGUF/resolve/main/wizardlm-13b-v1.2.Q4_0.gguf"
response = requests.get(url)
with open("./model.gguf", mode="wb") as file:
  file.write(response.content)

app = create_app(
    Settings(
        n_threads=2,  # set to number of cpu cores
        model="./model.gguf",
        embedding=False
    )
)

print(app)

llm = Llama(model_path="./model.gguf")
def response(input_text, history):
    output = llm(f"Q: {input_text} A:", max_tokens=256, stop=["Q:", "\n"], echo=True)
    return output['choices'][0]['text']

gr.ChatInterface(response).queue().launch(share=True) #False, server_name="0.0.0.0", server_port=7864)