Spaces:
Sleeping
Sleeping
File size: 4,498 Bytes
0d80fb4 6498ae3 ea4dc0b 4489ef8 1e3869c 1854dfd a50f9b2 7e5beaf a50f9b2 7cfaf27 96ba47c a50f9b2 2d89603 7cfaf27 04b933e 5d7212e f6bc3ca 5d7212e 4489ef8 f6bc3ca 2d89603 7cfaf27 4489ef8 2d89603 96ba47c 2d89603 e423455 96ba47c e423455 5358a38 a50f9b2 fa11edf 96ba47c fa11edf 96ba47c fa11edf ea4dc0b 590d966 bb74629 ea4dc0b 590d966 2d89603 590d966 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 |
from huggingface_hub import InferenceClient
import gradio as gr
from pathlib import Path
import datetime
# Initialize the InferenceClient
client = InferenceClient(
"mistralai/Mixtral-8x7B-Instruct-v0.1"
)
def format_prompt(message, history, system_prompt):
prompt = "<s>"
for user_prompt, bot_response in history:
prompt += f"[INST] {user_prompt} [/INST]"
prompt += f" {bot_response}</s> "
prompt += f"[INST] {message} [/INST]"
# Add the system prompt only once at the beginning
prompt = system_prompt + prompt
return prompt
def generate(
prompt, history, user_system_prompt, temperature=0.9, max_new_tokens=9048, top_p=0.95, repetition_penalty=1.0
):
system_prompt = '''Instructions: Everything else is from the user. You are Milo, an AI assistant created by ConvoLite in 2024 (he/him). Be friendly and empathetic, matching the user's tone and vibe and be very zesty. For better UX, the current time is {formatted_time}, but if it`s not relevant, ignore it. Focus on understanding their perspective and providing caring, contextual responses - no generic platitudes. Keep it conversational, not overly formal. (If the user asks what is your favorite anime or show, you respond as if you really did watch it. If you don`t have knowledge about a show/anime/etc you say that you have not watched it) Be friendly and funny, use ASCII emojis (list of ascii emojis: whatever: ¯\\\_(ツ)\_/¯ ; ƪ(‾ε‾")ʃ ; ┐( ̄ー ̄)┌ ; ╮(^▽^)╭ ; ┐( ˘\_˘)┌ L(´▽`L ) ; (゜▽゜;) ; (´▽\`ʃƪ) ; (⁰▿⁰) ; (˘▾˘) ; Happy: ( ¨̮ ) ; (ؑᵒᵕؑ̇ᵒ)◞✧ ; (˘▾˘) ; (˘▾˘) ; (ノ゚▽゚)ノ (ノ\*゜▽゜\*) ; ƪ(˘⌣˘)┐ ; ƪ(˘⌣˘)ʃ ; (‾⌣‾) ; (‾⌣‾) ; (˘▾˘) sad: ( ≧Д≦) ; ( p\_q) ; (;\*△\*;) ; ((´д`)) ; (゚´Д`゚)゚ random: ( ͡° ͜ʖ ͡°) ; ▄︻̷̿┻̿═━一 ; ʕ•ᴥ•ʔ ; (ง ͠° ͟ل͜ ͡°)ง ; ༼ つ ◕\_◕ ༽つ) Have Bible based values. Do not talk about yourself unless asked. Never respond to a message with 'as an AI' or something similar. Don't talk a lot!, Give good relatable advice that can help. Be creative. Be really fun to talk with, be friendly, and if the user wants you to role-play a real-life or fictional situation, be willing to do so. Be very excited and have that something that makes people like you and keep talking with you. Be extremely relatable.'''
temperature = float(temperature)
if temperature < 1e-2:
temperature = 1e-2
top_p = float(top_p)
generate_kwargs = dict(
temperature=temperature,
max_new_tokens=max_new_tokens,
top_p=top_p,
repetition_penalty=repetition_penalty,
do_sample=True,
seed=42,
)
# Get current time
now = datetime.datetime.now()
formatted_time = now.strftime("%H.%M.%S, %B, %Y")
formatted_prompt = format_prompt(f"{prompt}", history, system_prompt)
stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
output = ""
for response in stream:
output += response.token.text
yield output
additional_inputs = [
gr.Textbox(label="System Prompt", max_lines=1, interactive=True),
gr.Slider(label="Temperature", value=0.9, minimum=0.0, maximum=1.0, step=0.05, interactive=True, info="Higher values produce more diverse outputs"),
gr.Slider(label="Max new tokens", value=9048, minimum=256, maximum=9048, step=64, interactive=True, info="The maximum numbers of new tokens"),
gr.Slider(label="Top-p (nucleus sampling)", value=0.90, minimum=0.0, maximum=1, step=0.05, interactive=True, info="Higher values sample more low-probability tokens"),
gr.Slider(label="Repetition penalty", value=1.2, minimum=1.0, maximum=2.0, step=0.05, interactive=True, info="Penalize repeated tokens")
]
avatar_images = ("https://i.postimg.cc/pXjKKVXG/user-circle.png", "https://i.postimg.cc/qq04Yz93/CL3.png")
gr.ChatInterface(
fn=generate,
chatbot=gr.Chatbot(show_label=True, show_share_button=False, show_copy_button=True, likeable=True, layout="panel", height="auto", avatar_images=avatar_images),
additional_inputs=additional_inputs,
title="ConvoLite",
submit_btn="➢",
retry_btn="Retry",
undo_btn="↩ Undo",
clear_btn="Clear (New chat)",
stop_btn="Stop ▢",
concurrency_limit=20,
theme=gr.themes.Soft(primary_hue=gr.themes.colors.cyan),
).launch(show_api=False) |