Nepjune commited on
Commit
72cd368
·
verified ·
1 Parent(s): 7bb2720

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +22 -26
app.py CHANGED
@@ -1,37 +1,33 @@
1
  import gradio as gr
2
- from transformers import BlipProcessor, BlipForConditionalGeneration
3
- from concurrent.futures import ThreadPoolExecutor
4
- import pyttsx3
 
5
 
6
- model_id = "dblasko/blip-dalle3-img2prompt"
7
- model = BlipForConditionalGeneration.from_pretrained(model_id)
8
- processor = BlipProcessor.from_pretrained(model_id)
 
9
 
10
- # Initialize Text-to-Speech engine
11
- tts_engine = pyttsx3.init()
 
 
 
 
 
12
 
13
- def generate_caption(image):
14
- # Generate caption from image
15
- inputs = processor(images=image, return_tensors="pt")
16
- pixel_values = inputs.pixel_values
17
- generated_ids = model.generate(pixel_values=pixel_values, max_length=50)
18
- generated_caption = processor.batch_decode(generated_ids, skip_special_tokens=True, temperature=0.8, top_k=40, top_p=0.9)[0]
19
 
20
- # Convert the generated caption to speech
21
- tts_engine.save_to_file(generated_caption, "generated_audio.mp3")
22
- tts_engine.runAndWait()
23
-
24
- return generated_caption, "generated_audio.mp3"
25
-
26
- # Create a Gradio interface with an image input, a textbox output, a button, and an audio player
27
  demo = gr.Interface(
28
- fn=generate_caption,
29
- inputs=gr.Image(),
30
  outputs=[
31
- gr.Textbox(label="Generated caption"),
32
- gr.Button("Convert to Audio", None),
33
  ],
34
  live=True # ทำให้ Gradio ทำงานแบบไม่บล็อก
35
  )
36
  demo.launch(share=True)
37
-
 
1
  import gradio as gr
2
+ from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
3
+ import torchaudio
4
+ from torchaudio.transforms import Resample
5
+ import torch
6
 
7
+ # สร้างโมเดล TTS
8
+ model_name = "facebook/tts-crdnn-baker-softmax"
9
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
10
+ model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
11
 
12
+ # ฟังก์ชันสำหรับแปลงข้อความเป็นเสียง
13
+ def text_to_speech(text, output_path="generated_audio.wav"):
14
+ input_ids = tokenizer.encode(text, return_tensors="pt", max_length=150, truncation=True)
15
+ with torch.no_grad():
16
+ audio = model.generate(input_ids)
17
+ waveform = torchaudio.transforms.Resample(48_000, 24_000)(audio.squeeze().numpy())
18
+ torchaudio.save(output_path, waveform, 24_000)
19
 
20
+ def play_audio(audio_path):
21
+ gr.audio(audio_path, type="player")
 
 
 
 
22
 
23
+ # สร้าง Gradio interface ที่ใช้ image input, textbox output, button และ audio player
 
 
 
 
 
 
24
  demo = gr.Interface(
25
+ fn=text_to_speech,
26
+ inputs=gr.Textbox(label="Enter Text"),
27
  outputs=[
28
+ gr.Audio("audio", type="player"),
29
+ gr.Button("Convert to Audio", play_audio),
30
  ],
31
  live=True # ทำให้ Gradio ทำงานแบบไม่บล็อก
32
  )
33
  demo.launch(share=True)