File size: 3,742 Bytes
80cb67e a1b63cd 80cb67e 15dc7eb a1b63cd 3f28eaf a1b63cd 15dc7eb 054ff8e 15dc7eb 80cb67e 71926eb b055baf 71926eb a1cc86d 71926eb 15dc7eb 3f28eaf 15dc7eb 71926eb f6a60cd b0d8113 15dc7eb 71926eb 15dc7eb 9e121ac a1b89ce 9c6b9bb a1b89ce |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 |
import gradio as gr
from huggingface_hub import InferenceClient
client = InferenceClient("google/gemma-2-27b-it")
def generate_text(messages):
generated = ""
for token in client.chat_completion(messages, max_tokens=50,stream=True):
content = (token.choices[0].delta.content)
generated+=content
print(generated)
return generated #no stram version
def call_generate_text(message, history):
if len(message) == 0:
message.append({"role": "system", "content": "you response around 10 words"})
# history.append({"role": "user", "content": message})
print(message)
print(history)
messages = history+[{"role":"user","content":message}]
try:
text = generate_text(messages)
messages += [{"role":"assistant","content":text}]
return "",messages
except RuntimeError as e:
print(f"An unexpected error occurred: {e}")
return "",history
head = '''
<script src="https://cdn.jsdelivr.net/npm/onnxruntime-web/dist/ort.webgpu.min.js" ></script>
<script type="module">
import { matcha_tts,env } from "https://akjava.github.io/Matcha-TTS-Japanese/js-esm/v001-20240921/matcha_tts_onnx_en.js";
window.MatchaTTSEn = matcha_tts
function replaceSpecialChars(text) {
const pattern = /[^a-zA-Z0-9,.!?-_']/g;
return text.replace(pattern, ' ');
}
window.replaceSpecialChars = replaceSpecialChars
</script>
'''
with gr.Blocks(title="LLM with TTS",head=head) as demo:
gr.Markdown("## Please be patient, the first response may have a delay of up to 20 seconds while loading.")
gr.Markdown("**gemma-2-27b-it/LJSpeech**.LLM and TTS models will change without notice.")
js = """
function(chatbot){
text = (chatbot[chatbot.length -1])["content"]
tts_text = window.replaceSpecialChars(text)
console.log(tts_text)
window.MatchaTTSEn(tts_text,"./models/test.txt")
}
"""
chatbot = gr.Chatbot(type="messages")
chatbot.change(None,[chatbot],[],js=js)
msg = gr.Textbox()
clear = gr.ClearButton([msg, chatbot])
gr.HTML("""
<br>
<div id="footer">
<b>Spaces</b><br>
<a href="https://huggingface.co/spaces/Akjava/matcha-tts_vctk-onnx" style="font-size: 9px" target="link">Match-TTS VCTK-ONNX</a> |
<a href="https://huggingface.co/spaces/Akjava/matcha-tts-onnx-benchmarks" style="font-size: 9px" target="link">Match-TTS ONNX-Benchmark</a> |
<br><br>
<b>Credits</b><br>
<a href="https://github.com/akjava/Matcha-TTS-Japanese" style="font-size: 9px" target="link">Matcha-TTS-Japanese</a> |
<a href = "http://www.udialogue.org/download/cstr-vctk-corpus.html" style="font-size: 9px" target="link">CSTR VCTK Corpus</a> |
<a href = "https://github.com/cmusphinx/cmudict" style="font-size: 9px" target="link">CMUDict</a> |
<a href = "https://huggingface.co/docs/transformers.js/index" style="font-size: 9px" target="link">Transformer.js</a> |
<a href = "https://huggingface.co/cisco-ai/mini-bart-g2p" style="font-size: 9px" target="link">mini-bart-g2p</a> |
<a href = "https://onnxruntime.ai/docs/get-started/with-javascript/web.html" style="font-size: 9px" target="link">ONNXRuntime-Web</a> |
<a href = "https://github.com/akjava/English-To-IPA-Collections" style="font-size: 9px" target="link">English-To-IPA-Collections</a> |
<a href ="https://huggingface.co/papers/2309.03199" style="font-size: 9px" target="link">Matcha-TTS Paper</a>
</div>
""")
msg.submit(call_generate_text, [msg, chatbot], [msg, chatbot])
import os
pwd = "/home/user/app/" #os.getcwd()
#print(pwd)
path=(os.path.join(pwd,"models"))
print(path)
demo.launch(allowed_paths=[path])
|