Spaces:
Runtime error
Runtime error
Commit
·
6715980
1
Parent(s):
e68d5ed
add link to homebrew and blog post
Browse files
app.py
CHANGED
|
@@ -81,26 +81,18 @@ def text_to_audio_file(text):
|
|
| 81 |
|
| 82 |
|
| 83 |
@spaces.GPU
|
| 84 |
-
def process_input(
|
| 85 |
-
# if input_type == "text":
|
| 86 |
-
# audio_file = "temp_audio.wav"
|
| 87 |
|
| 88 |
for partial_message in process_audio(audio_file):
|
| 89 |
yield partial_message
|
| 90 |
|
| 91 |
-
# if input_type == "text":
|
| 92 |
-
# os.remove(audio_file)
|
| 93 |
|
| 94 |
@spaces.GPU
|
| 95 |
-
def process_transcribe_input(
|
| 96 |
-
# if input_type == "text":
|
| 97 |
-
# audio_file = "temp_audio.wav"
|
| 98 |
|
| 99 |
for partial_message in process_audio(audio_file, transcript=True):
|
| 100 |
yield partial_message
|
| 101 |
|
| 102 |
-
# if input_type == "text":
|
| 103 |
-
# os.remove(audio_file)
|
| 104 |
class StopOnTokens(StoppingCriteria):
|
| 105 |
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool:
|
| 106 |
# encode </s> token
|
|
@@ -167,7 +159,8 @@ examples.extend(bad_examples)
|
|
| 167 |
with gr.Blocks() as iface:
|
| 168 |
gr.Markdown("# Llama3.1-S: checkpoint Aug 19, 2024")
|
| 169 |
gr.Markdown("Enter text to convert to audio, then submit the audio to generate text or Upload Audio")
|
| 170 |
-
|
|
|
|
| 171 |
with gr.Row():
|
| 172 |
input_type = gr.Radio(["text", "audio"], label="Input Type", value="audio")
|
| 173 |
text_input = gr.Textbox(label="Text Input", visible=False)
|
|
@@ -202,12 +195,12 @@ with gr.Blocks() as iface:
|
|
| 202 |
|
| 203 |
submit_button.click(
|
| 204 |
process_input,
|
| 205 |
-
inputs=[
|
| 206 |
outputs=[text_output]
|
| 207 |
)
|
| 208 |
transcrip_button.click(
|
| 209 |
process_transcribe_input,
|
| 210 |
-
inputs=[
|
| 211 |
outputs=[text_output]
|
| 212 |
)
|
| 213 |
|
|
|
|
| 81 |
|
| 82 |
|
| 83 |
@spaces.GPU
|
| 84 |
+
def process_input(audio_file=None):
|
|
|
|
|
|
|
| 85 |
|
| 86 |
for partial_message in process_audio(audio_file):
|
| 87 |
yield partial_message
|
| 88 |
|
|
|
|
|
|
|
| 89 |
|
| 90 |
@spaces.GPU
|
| 91 |
+
def process_transcribe_input(audio_file=None):
|
|
|
|
|
|
|
| 92 |
|
| 93 |
for partial_message in process_audio(audio_file, transcript=True):
|
| 94 |
yield partial_message
|
| 95 |
|
|
|
|
|
|
|
| 96 |
class StopOnTokens(StoppingCriteria):
|
| 97 |
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool:
|
| 98 |
# encode </s> token
|
|
|
|
| 159 |
with gr.Blocks() as iface:
|
| 160 |
gr.Markdown("# Llama3.1-S: checkpoint Aug 19, 2024")
|
| 161 |
gr.Markdown("Enter text to convert to audio, then submit the audio to generate text or Upload Audio")
|
| 162 |
+
gr.Markdown("Powered by [Homebrew Ltd](https://homebrew.ltd/) | [Read our blog post](https://homebrew.ltd/blog/llama3-just-got-ears)")
|
| 163 |
+
|
| 164 |
with gr.Row():
|
| 165 |
input_type = gr.Radio(["text", "audio"], label="Input Type", value="audio")
|
| 166 |
text_input = gr.Textbox(label="Text Input", visible=False)
|
|
|
|
| 195 |
|
| 196 |
submit_button.click(
|
| 197 |
process_input,
|
| 198 |
+
inputs=[audio_input],
|
| 199 |
outputs=[text_output]
|
| 200 |
)
|
| 201 |
transcrip_button.click(
|
| 202 |
process_transcribe_input,
|
| 203 |
+
inputs=[audio_input],
|
| 204 |
outputs=[text_output]
|
| 205 |
)
|
| 206 |
|