Spaces:
Paused
Paused
from llama_cpp.server.app import create_app, Settings | |
from fastapi.responses import HTMLResponse | |
import os | |
import requests | |
from llama_cpp import Llama | |
import gradio as gr | |
url="https://huggingface.co/TheBloke/WizardLM-13B-V1.2-GGUF/resolve/main/wizardlm-13b-v1.2.Q4_0.gguf" | |
response = requests.get(url) | |
with open("./model.gguf", mode="wb") as file: | |
file.write(response.content) | |
app = create_app( | |
Settings( | |
n_threads=2, # set to number of cpu cores | |
model="./model.gguf", | |
embedding=False | |
) | |
) | |
print(app) | |
llm = Llama(model_path="./model.gguf") | |
def response(input_text, history): | |
output = llm(f"Q: {input_text} A:", max_tokens=256, stop=["Q:", "\n"], echo=True) | |
return output['choices'][0]['text'] | |
gr.ChatInterface(response).queue().launch(share=True) #False, server_name="0.0.0.0", server_port=7864) | |