Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
import gradio as gr
|
2 |
from transformers import BlipProcessor, BlipForConditionalGeneration
|
3 |
from gtts import gTTS
|
4 |
-
import
|
5 |
|
6 |
model_id = "dblasko/blip-dalle3-img2prompt"
|
7 |
model = BlipForConditionalGeneration.from_pretrained(model_id)
|
@@ -22,10 +22,7 @@ def generate_caption(image):
|
|
22 |
return generated_caption, audio_path
|
23 |
|
24 |
def play_audio(audio_path):
|
25 |
-
|
26 |
-
pygame.mixer.music.load(audio_path)
|
27 |
-
pygame.mixer.music.play()
|
28 |
-
pygame.event.wait()
|
29 |
|
30 |
# Create a Gradio interface with an image input, a textbox output, a button, and an audio player
|
31 |
demo = gr.Interface(
|
@@ -34,7 +31,6 @@ demo = gr.Interface(
|
|
34 |
outputs=[
|
35 |
gr.Textbox(label="Generated caption"),
|
36 |
gr.Button("Convert to Audio", play_audio),
|
37 |
-
gr.Audio("audio")
|
38 |
]
|
39 |
)
|
40 |
demo.launch()
|
|
|
1 |
import gradio as gr
|
2 |
from transformers import BlipProcessor, BlipForConditionalGeneration
|
3 |
from gtts import gTTS
|
4 |
+
from IPython.display import Audio
|
5 |
|
6 |
model_id = "dblasko/blip-dalle3-img2prompt"
|
7 |
model = BlipForConditionalGeneration.from_pretrained(model_id)
|
|
|
22 |
return generated_caption, audio_path
|
23 |
|
24 |
def play_audio(audio_path):
|
25 |
+
return Audio(audio_path)
|
|
|
|
|
|
|
26 |
|
27 |
# Create a Gradio interface with an image input, a textbox output, a button, and an audio player
|
28 |
demo = gr.Interface(
|
|
|
31 |
outputs=[
|
32 |
gr.Textbox(label="Generated caption"),
|
33 |
gr.Button("Convert to Audio", play_audio),
|
|
|
34 |
]
|
35 |
)
|
36 |
demo.launch()
|