File size: 6,075 Bytes
80cb67e a1b63cd ba1f7e3 a1b63cd 80cb67e 15dc7eb a1b63cd 3f28eaf a1b63cd 15dc7eb 054ff8e 15dc7eb 73d9256 15dc7eb 5020793 73d9256 15dc7eb 5020793 73d9256 15dc7eb 80cb67e 71926eb b055baf 71926eb 0585512 a1cc86d 0585512 71926eb 15dc7eb 8030bef f1990d7 00fbbd8 669f26f 3f28eaf 15dc7eb 0585512 15dc7eb 18b6565 310263f 18b6565 15dc7eb 2f62200 15dc7eb 71926eb 15dc7eb 9e121ac 48ca07d 0585512 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 |
import gradio as gr
from huggingface_hub import InferenceClient
client = InferenceClient("mistralai/Mistral-7B-Instruct-v0.3")
def generate_text(messages):
generated = ""
for token in client.chat_completion(messages, max_tokens=50,stream=True):
content = (token.choices[0].delta.content)
generated+=content
print(generated)
return generated #no stram version
def call_generate_text(message, history):
#if len(message) == 0:
# messages.append({"role": "system", "content": "you response around 10 words"})
print(message)
print(history)
user_message = [{"role":"user","content":message}]
messages = history + user_message
try:
text = generate_text(messages)
assistant_message=[{"role":"assistant","content":text}]
messages += assistant_message
return "",messages
except RuntimeError as e:
print(f"An unexpected error occurred: {e}")
return "",history
head = '''
<script src="https://cdn.jsdelivr.net/npm/onnxruntime-web/dist/ort.webgpu.min.js" ></script>
<script type="module">
import { matcha_tts,env } from "https://akjava.github.io/Matcha-TTS-Japanese/js-esm/v001-20240921/matcha_tts_onnx_en.js";
window.MatchaTTSEn = matcha_tts
</script>
<script>
let last_chatbot_size = 0
let tts_text_index = 0
let tts_texts = []
const interval = 100
async function start_multi_line_tts() {
//console.log("start_multi_line_tts")
//console.log(tts_texts.length)
if (tts_texts.length > tts_text_index){
const tts_text = tts_texts[tts_text_index]
tts_text_index += 1
console.log(tts_text)
if (tts_text!=""){
await window.MatchaTTSEn(tts_text,"/file=models/ljspeech_sim.onnx")
}
}
setTimeout(start_multi_line_tts, interval);
}
function reset_tts_text(){
console.log("reset tts text")
tts_text_index = 0
tts_texts = []
}
function replaceSpecialChars(text) {
const pattern = /[^a-zA-Z0-9,.!?-_']/g;
return text.replace(pattern, ' ');
}
function update_tts_texts(text){
//console.log(text)
const replaced_text = replaceSpecialChars(text)
const new_texts = []
const splited = replaced_text.split(/[.!?]+\s/);
for (let i = 0; i < splited.length; i++) {
const value = splited[i].trim();
if (i === splited.length - 1) {
if (value.endsWith(".") || value.endsWith("?") || value.endsWith("!")){
new_texts.push(value);
}
console.log("Last element:", value);
} else {
// その他の要素に対する処理
new_texts.push(value);
}
}
tts_texts=new_texts
}
function update_chatbot(chatbot){
//console.log(chatbot)
if (chatbot.length!=last_chatbot_size){
last_chatbot_size = chatbot.length
reset_tts_text()
}
text = (chatbot[chatbot.length -1])["content"]
update_tts_texts(text)
}
window.replaceSpecialChars = replaceSpecialChars
window.update_chatbot = update_chatbot
window.update_tts_texts = update_tts_texts
window.reset_tts_text = reset_tts_text
start_multi_line_tts();
</script>
'''
with gr.Blocks(title="LLM with TTS",head=head) as demo:
gr.Markdown("## LLM is unstable:The inference client used in this demo exhibits inconsistent performance. While it can provide responses in milliseconds, it sometimes becomes unresponsive and times out.")
gr.Markdown("## TTS talke a long loading time:Please be patient, the first response may have a delay of up to over 20 seconds while loading.")
gr.Markdown("**Mistral-7B-Instruct-v0.3/LJSpeech**.LLM and TTS models will change without notice.")
js = """
function(chatbot){
window.update_chatbot(chatbot)
//text = (chatbot[chatbot.length -1])["content"]
//tts_text = window.replaceSpecialChars(text)
//console.log(tts_text)
//window.MatchaTTSEn(tts_text,"/file=models/ljspeech_sim.onnx")
}
"""
chatbot = gr.Chatbot(type="messages")
chatbot.change(None,[chatbot],[],js=js)
msg = gr.Textbox()
with gr.Row():
clear = gr.ClearButton([msg, chatbot])
submit = gr.Button("Submit",variant="primary").click(call_generate_text, inputs=[msg, chatbot], outputs=[msg,chatbot])
gr.HTML("""
<br>
<div id="footer">
<b>Spaces</b><br>
<a href="https://huggingface.co/spaces/Akjava/matcha-tts_vctk-onnx" style="font-size: 9px" target="link">Match-TTS VCTK-ONNX</a> |
<a href="https://huggingface.co/spaces/Akjava/matcha-tts-onnx-benchmarks" style="font-size: 9px" target="link">Match-TTS ONNX-Benchmark</a> |
<a href="https://huggingface.co/spaces/Akjava/AIChat-matcha-tts-onnx-en" style="font-size: 9px" target="link">AIChat-Matcha-TTS ONNX English</a> |
<br><br>
<b>Credits</b><br>
<a href="https://github.com/akjava/Matcha-TTS-Japanese" style="font-size: 9px" target="link">Matcha-TTS-Japanese</a> |
<a href = "http://www.udialogue.org/download/cstr-vctk-corpus.html" style="font-size: 9px" target="link">CSTR VCTK Corpus</a> |
<a href = "https://github.com/cmusphinx/cmudict" style="font-size: 9px" target="link">CMUDict</a> |
<a href = "https://huggingface.co/docs/transformers.js/index" style="font-size: 9px" target="link">Transformer.js</a> |
<a href = "https://huggingface.co/cisco-ai/mini-bart-g2p" style="font-size: 9px" target="link">mini-bart-g2p</a> |
<a href = "https://onnxruntime.ai/docs/get-started/with-javascript/web.html" style="font-size: 9px" target="link">ONNXRuntime-Web</a> |
<a href = "https://github.com/akjava/English-To-IPA-Collections" style="font-size: 9px" target="link">English-To-IPA-Collections</a> |
<a href ="https://huggingface.co/papers/2309.03199" style="font-size: 9px" target="link">Matcha-TTS Paper</a>
</div>
""")
msg.submit(call_generate_text, [msg, chatbot], [msg, chatbot])
import os
dir ="/home/user/app/"
dir = "C:\\Users\\owner\\Documents\\pythons\\huggingface\\mistral-7b-v0.3-matcha-tts-en"
demo.launch(allowed_paths=[os.path.join(dir,"models","ljspeech_sim.onnx")])
|