|
import torch |
|
import gradio as gr |
|
from PIL import Image |
|
import scipy.io.wavfile as wavfile |
|
|
|
|
|
from transformers import pipeline |
|
|
|
device = "cuda" if torch.cuda.is_available() else "cpu" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
caption_image = pipeline("image-to-text", |
|
model="Salesforce/blip-image-captioning-large", device=device) |
|
|
|
narrator = pipeline("text-to-speech", |
|
model="kakao-enterprise/vits-ljs") |
|
|
|
|
|
def generate_audio(text): |
|
|
|
narrated_text = narrator(text) |
|
|
|
|
|
wavfile.write("output.wav", rate=narrated_text["sampling_rate"], |
|
data=narrated_text["audio"][0]) |
|
|
|
return "output.wav" |
|
|
|
|
|
def caption_my_image(pil_image): |
|
|
|
semantics = caption_image(images=pil_image)[0]['generated_text'] |
|
|
|
audio_path = generate_audio(semantics) |
|
|
|
return semantics, audio_path |
|
|
|
|
|
|
|
demo = gr.Interface( |
|
fn=caption_my_image, |
|
inputs=[gr.Image(label="Select Image", type="pil")], |
|
outputs=[ |
|
gr.Textbox(label="Generated Caption"), |
|
gr.Audio(label="Image Caption") |
|
], |
|
title="Story Generation From Images", |
|
description="THIS APPLICATION WILL BE USED TO GENERATE STORY OF THE IMAGE." |
|
) |
|
|
|
demo.launch() |