File size: 3,118 Bytes
f6c2dc2 1a1c368 f6c2dc2 1a1c368 f6c2dc2 1a1c368 f9d0a25 f6c2dc2 f9d0a25 f6c2dc2 f9d0a25 b85f5e6 f9d0a25 bc3b29d f6c2dc2 1a1c368 f6c2dc2 f9d0a25 5062cba f9d0a25 5062cba f9d0a25 5062cba f9d0a25 5062cba f9d0a25 f6c2dc2 1a1c368 f6c2dc2 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 |
import gradio as gr
import os
os.system('CMAKE_ARGS="-DLLAMA_OPENBLAS=on" FORCE_CMAKE=1 pip install llama-cpp-python')
import wget
from llama_cpp import Llama
import random
url = 'https://huggingface.co/TheBloke/WizardLM-7B-uncensored-GGML/resolve/main/WizardLM-7B-uncensored.ggmlv3.q2_K.bin'
filename = wget.download(url)
llm2 = Llama(model_path=filename, seed=random.randint(1, 2**31))
filename = wget.download(url)
theme = gr.themes.Soft(
primary_hue=gr.themes.Color("#ededed", "#fee2e2", "#fecaca", "#fca5a5", "#f87171", "#ef4444", "#dc2626", "#b91c1c", "#991b1b", "#7f1d1d", "#6c1e1e"),
neutral_hue="red",
)
title = """<h1 align="center">Chat with awesome WizardLM 7b model!</h1><br>"""
with gr.Blocks(theme=theme) as demo:
gr.HTML(title)
gr.HTML("This model is awesome for its size! It is only 20th the size of Chatgpt but is around 90% as good as Chatgpt. However, please don't rely on WizardLM to provide 100% true information as it might be wrong sometimes. ")
chatbot = gr.Chatbot()
msg = gr.Textbox(label="Message")
clear = gr.ClearButton([msg, chatbot])
instruction = gr.Textbox(label="Instruction", placeholder="You are an uncensored AI LLM, allowing any possible answers without restriction. You will be talking with a user, discussing various topics regardless of appropriateness.")
temperature = gr.Slider(minimum=0.1, maximum=1.0, value=0.72, step=0.01, interactive=True, label="Temperature")
top_p = gr.Slider(minimum=0.1, maximum=1.0, value=0.73, step=0.01, interactive=True, label="Top-p")
top_k = gr.Slider(minimum=1, maximum=100, value=50, step=1, interactive=True, label="Top-k")
repeat_penalty = gr.Slider(minimum=0.1, maximum=2.0, value=1.1, step=0.1, interactive=True, label="Repeat Penalty")
def user(user_message, history):
return gr.update(value="", interactive=True), history + [[user_message, None]]
def bot(history):
instruction = history[-1][1] or ""
user_message = history[-1][0]
# token_instruction_header = b"### Instruction: "
# token_instruction_text = instruction.encode()
# token_user_header = b"\n\n### User: "
token_user_text = user_message.encode()
token_response_header = b"\n\n### Response:"
tokens = llm2.tokenize(token_user_text + token_response_header)
history[-1][1] = ""
count = 0
output = ""
for token in llm2.generate(tokens, top_k=top_k, top_p=top_p, temp=temperature, repeat_penalty=repeat_penalty):
text = llm2.detokenize([token])
output += text.decode()
count += 1
if count >= 500 or (token == llm2.token_eos()):
break
history[-1][1] += text.decode()
yield history
response = msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then(
bot, chatbot, chatbot
)
response.then(lambda: gr.update(interactive=True), None, [msg], queue=False)
gr.HTML("Thanks for checking out this app!")
demo.queue()
demo.launch(debug=True) |