Spaces:
Runtime error
Runtime error
upd: use whisper finetuned ru
Browse files
app.py
CHANGED
@@ -21,11 +21,9 @@ model_ru2en = AutoModelForSeq2SeqLM.from_pretrained("Helsinki-NLP/opus-mt-ru-en"
|
|
21 |
tokenizer_en2ru = AutoTokenizer.from_pretrained("Helsinki-NLP/opus-mt-en-ru")
|
22 |
model_en2ru = AutoModelForSeq2SeqLM.from_pretrained("Helsinki-NLP/opus-mt-en-ru")
|
23 |
|
24 |
-
|
25 |
-
transcriber = pipeline("automatic-speech-recognition", model="openai/whisper-base.en")
|
26 |
|
27 |
|
28 |
-
# Functions for translation
|
29 |
def translate_ru2en(text):
|
30 |
inputs = tokenizer_ru2en(text, return_tensors="pt")
|
31 |
outputs = model_ru2en.generate(**inputs)
|
@@ -40,7 +38,6 @@ def translate_en2ru(text):
|
|
40 |
return translated_text
|
41 |
|
42 |
|
43 |
-
# Function to generate answers
|
44 |
def generate_answer_git(image, question):
|
45 |
with torch.no_grad():
|
46 |
encoding = processor(
|
@@ -90,7 +87,6 @@ def transcribe(stream, new_chunk):
|
|
90 |
return stream, transcriber({"sampling_rate": sr, "raw": stream})["text"]
|
91 |
|
92 |
|
93 |
-
# Gradio Interface
|
94 |
interface = gr.Interface(
|
95 |
fn=generate_answer,
|
96 |
inputs=[
|
|
|
21 |
tokenizer_en2ru = AutoTokenizer.from_pretrained("Helsinki-NLP/opus-mt-en-ru")
|
22 |
model_en2ru = AutoModelForSeq2SeqLM.from_pretrained("Helsinki-NLP/opus-mt-en-ru")
|
23 |
|
24 |
+
transcriber = pipeline("automatic-speech-recognition", model="artyomboyko/whisper-base-fine_tuned-ru")
|
|
|
25 |
|
26 |
|
|
|
27 |
def translate_ru2en(text):
|
28 |
inputs = tokenizer_ru2en(text, return_tensors="pt")
|
29 |
outputs = model_ru2en.generate(**inputs)
|
|
|
38 |
return translated_text
|
39 |
|
40 |
|
|
|
41 |
def generate_answer_git(image, question):
|
42 |
with torch.no_grad():
|
43 |
encoding = processor(
|
|
|
87 |
return stream, transcriber({"sampling_rate": sr, "raw": stream})["text"]
|
88 |
|
89 |
|
|
|
90 |
interface = gr.Interface(
|
91 |
fn=generate_answer,
|
92 |
inputs=[
|