Spaces:
Sleeping
Sleeping
File size: 1,359 Bytes
86c0a67 148f6b6 86c0a67 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 |
import gradio as gr
import torch
import pytube as pt
from transformers import pipeline
from huggingface_hub import model_info
transcribe_model_ckpt = "openai/whisper-small"
lang = "en"
transcribe_pipe = pipeline(
task="automatic-speech-recognition",
model=model_ckpt,
chunk_length_s=30,
)
transcribe_pipe.model.config.forced_decoder_ids = transcribe_pipe.tokenizer.get_decoder_prompt_ids(language=lang, task="transcribe")
def yt_transcribe(yt_url):
yt = pt.YouTube(yt_url)
html_embed_str = _return_yt_html_embed(yt_url)
stream = yt.streams.filter(only_audio=True)[0]
stream.download(filename="audio.mp3")
text = transcribe_pipe("audio.mp3")["text"]
return html_embed_str, text
qa_model_ckpt = "deepset/tinyroberta-squad2"
qa_pipe = pipeline('question-answering', model=model_ckpt, tokenizer=model_ckpt)
def get_answer(query,context):
QA_input = {
'question': query,
'context': context
}
res = nlp(QA_input)["answer"]
return res
def update(name):
return f"Welcome to Gradio, {name}!"
with gr.Blocks() as demo:
gr.Markdown("Start typing below and then click **Run** to see the output.")
with gr.Row():
inp = gr.Textbox(placeholder="What is your name?")
out = gr.Textbox()
btn = gr.Button("Run")
btn.click(fn=update, inputs=inp, outputs=out)
demo.launch() |