|
import torch |
|
import gradio as gr |
|
from PIL import Image |
|
import scipy.io.wavfile as wavfile |
|
|
|
|
|
from transformers import pipeline |
|
|
|
device = "cuda" if torch.cuda.is_available() else "cpu" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
caption_image = pipeline("image-to-text", |
|
model="Salesforce/blip-image-captioning-large", device=device) |
|
|
|
narrator = pipeline("text-to-speech", |
|
model="kakao-enterprise/vits-ljs") |
|
|
|
caption_image = pipeline("image-to-text", |
|
model=model_path, device=device) |
|
|
|
narrator = pipeline("text-to-speech", |
|
model=tts_model_path) |
|
|
|
def generate_audio(text): |
|
|
|
narrated_text = narrator(text) |
|
|
|
|
|
wavfile.write("output.wav", rate=narrated_text["sampling_rate"], |
|
data=narrated_text["audio"][0]) |
|
|
|
return "output.wav" |
|
|
|
|
|
def caption_my_image(pil_image): |
|
semantics = caption_image(images=pil_image)[0]['generated_text'] |
|
return generate_audio(semantics) |
|
|
|
demo = gr.Interface(fn=caption_my_image, |
|
inputs=[gr.Image(label="Select Image",type="pil")], |
|
outputs=[gr.Audio(label="Image Caption")], |
|
title="@GenAILearniverse Project 8: Image Captioning", |
|
description="THIS APPLICATION WILL BE USED TO CAPTION THE IMAGE.") |
|
demo.launch() |
|
|