import gradio as gr
from huggingface_hub import InferenceClient
client = InferenceClient("mistralai/Mistral-7B-Instruct-v0.3")
def generate_text(messages):
generated = ""
for token in client.chat_completion(messages, max_tokens=50,stream=True):
content = (token.choices[0].delta.content)
generated+=content
print(generated)
return generated #no stram version
def call_generate_text(message, history):
#if len(message) == 0:
# messages.append({"role": "system", "content": "you response around 10 words"})
print(message)
print(history)
user_message = [{"role":"user","content":message}]
messages = history + user_message
try:
text = generate_text(messages)
assistant_message=[{"role":"assistant","content":text}]
messages += assistant_message
return "",messages
except RuntimeError as e:
print(f"An unexpected error occurred: {e}")
return "",history
head = '''
'''
with gr.Blocks(title="LLM with TTS",head=head) as demo:
gr.Markdown("## LLM is unstable:The inference client used in this demo exhibits inconsistent performance. While it can provide responses in milliseconds, it sometimes becomes unresponsive and times out.")
gr.Markdown("## TTS talke a long loading time:Please be patient, the first response may have a delay of up to over 20 seconds while loading.")
gr.Markdown("**Mistral-7B-Instruct-v0.3/LJSpeech**.LLM and TTS models will change without notice.")
js = """
function(chatbot){
text = (chatbot[chatbot.length -1])["content"]
tts_text = window.replaceSpecialChars(text)
console.log(tts_text)
window.MatchaTTSEn(tts_text,"/file=models/ljspeech_sim.onnx")
}
"""
chatbot = gr.Chatbot(type="messages")
chatbot.change(None,[chatbot],[],js=js)
msg = gr.Textbox()
with gr.Row():
clear = gr.ClearButton([msg, chatbot])
submit = gr.Button("Submit",variant="primary").click(call_generate_text, inputs=[msg, chatbot], outputs=[msg,chatbot])
gr.HTML("""