Sayiqa7 commited on
Commit
6683fa5
·
verified ·
1 Parent(s): 3f8cb82

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +145 -0
app.py ADDED
@@ -0,0 +1,145 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import subprocess
2
+ # Install required libraries
3
+ subprocess.check_call(["pip", "install", "torch>=1.11.0"])
4
+ subprocess.check_call(["pip", "install", "transformers>=4.31.0"])
5
+ subprocess.check_call(["pip", "install", "diffusers>=0.14.0"])
6
+ subprocess.check_call(["pip", "install", "librosa"])
7
+ subprocess.check_call(["pip", "install", "accelerate>=0.20.1"])
8
+
9
+ import os
10
+ import threading
11
+ import numpy as np
12
+ import librosa
13
+ import torch
14
+ import gradio as gr
15
+ from functools import lru_cache
16
+ from transformers import pipeline
17
+ from huggingface_hub import login
18
+ from diffusers import StableDiffusionPipeline, DPMSolverMultistepScheduler
19
+
20
+ # Ensure required dependencies are installed
21
+ def install_missing_packages():
22
+ required_packages = {
23
+ "librosa": None,
24
+ "diffusers": ">=0.14.0",
25
+ "gradio": ">=3.35.2",
26
+ "huggingface_hub": None,
27
+ "accelerate": ">=0.20.1",
28
+ "transformers": ">=4.31.0"
29
+ }
30
+ for package, version in required_packages.items():
31
+ try:
32
+ __import__(package)
33
+ except ImportError:
34
+ package_name = f"{package}{version}" if version else package
35
+ subprocess.check_call(["pip", "install", package_name])
36
+
37
+ install_missing_packages()
38
+
39
+ # Get Hugging Face token for authentication
40
+ hf_token = os.getenv("HF_TOKEN")
41
+ if hf_token:
42
+ login(hf_token)
43
+ else:
44
+ raise ValueError("HF_TOKEN environment variable not set.")
45
+
46
+ # Load speech-to-text model (Whisper)
47
+ speech_to_text = pipeline("automatic-speech-recognition", model="openai/whisper-tiny")
48
+
49
+ # Load Stable Diffusion model for text-to-image
50
+ text_to_image = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5")
51
+ device = "cuda" if torch.cuda.is_available() else "cpu"
52
+ text_to_image.to(device)
53
+ text_to_image.enable_attention_slicing()
54
+ text_to_image.safety_checker = None
55
+ text_to_image.scheduler = DPMSolverMultistepScheduler.from_config(text_to_image.scheduler.config)
56
+
57
+ # Preprocess audio file into NumPy array
58
+ def preprocess_audio(audio_path):
59
+ try:
60
+ audio, sr = librosa.load(audio_path, sr=16000) # Resample to 16kHz
61
+ return np.array(audio, dtype=np.float32)
62
+ except Exception as e:
63
+ return f"Error in preprocessing audio: {str(e)}"
64
+
65
+ # Speech-to-text function
66
+ @lru_cache(maxsize=10)
67
+ def transcribe_audio(audio_path):
68
+ try:
69
+ audio_array = preprocess_audio(audio_path)
70
+ if isinstance(audio_array, str): # Error message from preprocessing
71
+ return audio_array
72
+ result = speech_to_text(audio_array)
73
+ return result["text"]
74
+ except Exception as e:
75
+ return f"Error in transcription: {str(e)}"
76
+
77
+ # Text-to-image function
78
+ @lru_cache(maxsize=10)
79
+ def generate_image_from_text(text):
80
+ try:
81
+ image = text_to_image(text, height=256, width=256).images[0] # Generate smaller images for speed
82
+ return image
83
+ except Exception as e:
84
+ return f"Error in image generation: {str(e)}"
85
+
86
+ # Optimized combined processing function
87
+ def process_audio_and_generate_image(audio_path):
88
+ transcription_result = {"result": None}
89
+ image_result = {"result": None}
90
+
91
+ # Function to run transcription and image generation in parallel
92
+ def transcription_thread():
93
+ transcription_result["result"] = transcribe_audio(audio_path)
94
+
95
+ def image_generation_thread():
96
+ transcription = transcription_result["result"]
97
+ if transcription and "Error" not in transcription:
98
+ image_result["result"] = generate_image_from_text(transcription)
99
+
100
+ # Start both tasks in parallel
101
+ t1 = threading.Thread(target=transcription_thread)
102
+ t2 = threading.Thread(target=image_generation_thread)
103
+
104
+ t1.start()
105
+ t2.start()
106
+
107
+ t1.join() # Wait for transcription to finish
108
+ t2.join() # Wait for image generation to finish
109
+
110
+ transcription = transcription_result["result"]
111
+ image = image_result["result"]
112
+
113
+ if "Error" in transcription:
114
+ return None, transcription
115
+ if isinstance(image, str) and "Error" in image:
116
+ return None, image
117
+
118
+ return image, transcription
119
+
120
+ # Gradio interface for speech-to-text
121
+ speech_to_text_iface = gr.Interface(
122
+ fn=transcribe_audio,
123
+ inputs=gr.Audio(type="filepath", label="Upload audio file for transcription (WAV/MP3)"),
124
+ outputs=gr.Textbox(label="Transcription"),
125
+ title="Speech-to-Text Transcription",
126
+ description="Upload an audio file to transcribe speech into text.",
127
+ )
128
+
129
+ # Gradio interface for voice-to-image
130
+ voice_to_image_iface = gr.Interface(
131
+ fn=process_audio_and_generate_image,
132
+ inputs=gr.Audio(type="filepath", label="Upload audio file (WAV/MP3)"),
133
+ outputs=[gr.Image(label="Generated Image"), gr.Textbox(label="Transcription")],
134
+ title="Voice-to-Image Generator",
135
+ description="Upload an audio file to transcribe speech to text, and then generate an image based on the transcription.",
136
+ )
137
+
138
+ # Combined Gradio app
139
+ iface = gr.TabbedInterface(
140
+ interface_list=[speech_to_text_iface, voice_to_image_iface],
141
+ tab_names=["Speech-to-Text", "Voice-to-Image"]
142
+ )
143
+
144
+ # Launch Gradio interface
145
+ iface.launch(debug=True, share=True)