Spaces:
Sleeping
Sleeping
Update src/streamlit_app.py
Browse files- src/streamlit_app.py +148 -106
src/streamlit_app.py
CHANGED
@@ -1,119 +1,161 @@
|
|
1 |
import streamlit as st
|
|
|
|
|
|
|
|
|
2 |
import torch
|
3 |
-
import
|
4 |
-
from
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
st.write("Caricamento del modello ViT-GPT2 per la captioning dell'immagine...")
|
23 |
try:
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
|
|
|
|
|
|
28 |
except Exception as e:
|
29 |
-
st.error(f"
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
|
|
|
|
|
|
|
|
34 |
try:
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
return
|
40 |
except Exception as e:
|
41 |
-
st.error(f"
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
caption = vit_gpt2_tokenizer.decode(output_ids[0], skip_special_tokens=True)
|
53 |
-
return caption
|
54 |
-
|
55 |
-
# Funzione per generare il soundscape
|
56 |
-
def generate_soundscape(prompt_text):
|
57 |
-
sample_size = stable_audio_config["sample_size"]
|
58 |
-
sample_rate = stable_audio_config["sample_rate"]
|
59 |
|
60 |
-
#
|
61 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
62 |
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
# Genera audio
|
68 |
-
with st.spinner("Generazione audio in corso... (potrebbe richiedere un po' di tempo)"):
|
69 |
-
output = generate_diffusion_cond(
|
70 |
-
stable_audio_model,
|
71 |
-
conditioning=conditioning,
|
72 |
-
sample_size=sample_size,
|
73 |
-
device=device,
|
74 |
-
steps=100, # Numero di step di diffusione (puoi renderlo configurabile)
|
75 |
-
cfg_scale=7, # Scala di classifer-free guidance
|
76 |
-
sigma_min=0.03,
|
77 |
-
sigma_max=500,
|
78 |
-
sampler_type="dpmpp-3m-sde" # Tipo di sampler
|
79 |
-
)
|
80 |
-
|
81 |
-
# Riorganizza il batch audio in una singola sequenza
|
82 |
-
output = rearrange(output, "b d n -> d (b n)")
|
83 |
|
84 |
-
|
85 |
-
|
|
|
|
|
86 |
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
|
|
91 |
|
92 |
-
#
|
93 |
-
uploaded_file = st.file_uploader("
|
94 |
|
95 |
-
caption = ""
|
96 |
if uploaded_file is not None:
|
97 |
-
|
98 |
-
image = Image.open(uploaded_file).convert("RGB")
|
99 |
-
st.image(image, caption="
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
st.
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
)
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import streamlit as st
|
2 |
+
from PIL import Image
|
3 |
+
import io
|
4 |
+
import soundfile as sf
|
5 |
+
import numpy as np
|
6 |
import torch
|
7 |
+
from transformers import pipeline
|
8 |
+
from diffusers import StableAudioPipeline
|
9 |
+
|
10 |
+
# --- Configuration ---
|
11 |
+
# Determine the optimal device for model inference
|
12 |
+
# Prioritize CUDA (NVIDIA GPUs), then MPS (Apple Silicon), fallback to CPU
|
13 |
+
DEVICE = "cuda" if torch.cuda.is_available() else ("mps" if torch.backends.mps.is_available() else "cpu")
|
14 |
+
|
15 |
+
# Use float16 for reduced memory and faster inference on compatible hardware (GPU/MPS)
|
16 |
+
# Fallback to float32 for CPU for better stability
|
17 |
+
TORCH_DTYPE = torch.float16 if DEVICE in ["cuda", "mps"] else torch.float32
|
18 |
+
|
19 |
+
# --- Cached Model Loading Functions ---
|
20 |
+
@st.cache_resource(show_spinner="Loading Image Captioning Model (BLIP)...")
|
21 |
+
def load_blip_model():
|
22 |
+
"""
|
23 |
+
Loads the BLIP image captioning model using Hugging Face transformers pipeline.
|
24 |
+
The model is cached to prevent reloading on every Streamlit rerun.
|
25 |
+
"""
|
|
|
26 |
try:
|
27 |
+
captioner = pipeline(
|
28 |
+
"image-to-text",
|
29 |
+
model="Salesforce/blip-image-captioning-base",
|
30 |
+
torch_dtype=TORCH_DTYPE,
|
31 |
+
device=DEVICE
|
32 |
+
)
|
33 |
+
return captioner
|
34 |
except Exception as e:
|
35 |
+
st.error(f"Failed to load BLIP model: {e}")
|
36 |
+
return None
|
37 |
+
|
38 |
+
@st.cache_resource(show_spinner="Loading Audio Generation Model (Stable Audio Open 1.0)...")
|
39 |
+
def load_stable_audio_model():
|
40 |
+
"""
|
41 |
+
Loads the Stable Audio Open 1.0 pipeline using Hugging Face diffusers.
|
42 |
+
The pipeline is cached to prevent reloading on every Streamlit rerun.
|
43 |
+
"""
|
44 |
try:
|
45 |
+
audio_pipeline = StableAudioPipeline.from_pretrained(
|
46 |
+
"stabilityai/stable-audio-open-1.0",
|
47 |
+
torch_dtype=TORCH_DTYPE
|
48 |
+
).to(DEVICE)
|
49 |
+
return audio_pipeline
|
50 |
except Exception as e:
|
51 |
+
st.error(f"Failed to load Stable Audio model: {e}")
|
52 |
+
return None
|
53 |
+
|
54 |
+
# --- Audio Conversion Utility ---
|
55 |
+
def convert_numpy_to_wav_bytes(audio_array: np.ndarray, sample_rate: int) -> bytes:
|
56 |
+
"""
|
57 |
+
Converts a NumPy audio array to an in-memory WAV byte stream.
|
58 |
+
This avoids writing temporary files to disk, which is efficient and
|
59 |
+
suitable for ephemeral environments like Hugging Face Spaces.
|
60 |
+
"""
|
61 |
+
byte_io = io.BytesIO()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
62 |
|
63 |
+
# Stable Audio Open's diffusers output is (channels, frames).
|
64 |
+
# soundfile typically expects (frames, channels) for stereo.
|
65 |
+
# Transpose if it's a 2D array (stereo) to match soundfile's expectation.
|
66 |
+
if audio_array.ndim == 2 and audio_array.shape == 2: # Check if stereo (2 channels)
|
67 |
+
audio_array = audio_array.T # Transpose to (frames, channels) [1]
|
68 |
+
|
69 |
+
# Write the NumPy array to the in-memory BytesIO object as a WAV file [1, 2]
|
70 |
+
sf.write(byte_io, audio_array, sample_rate, format='WAV', subtype='FLOAT')
|
71 |
|
72 |
+
# IMPORTANT: Reset the stream position to the beginning before reading [3]
|
73 |
+
byte_io.seek(0)
|
74 |
+
return byte_io.read()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
75 |
|
76 |
+
# --- Streamlit App Layout ---
|
77 |
+
st.set_page_config(layout="centered", page_title="Image-to-Soundscape Generator")
|
78 |
+
st.title("🏞️ Image-to-Soundscape Generator 🎶")
|
79 |
+
st.markdown("Upload a landscape image, and let AI transform it into a unique soundscape!")
|
80 |
|
81 |
+
# Initialize session state for persistence across reruns [4]
|
82 |
+
if "audio_bytes" not in st.session_state:
|
83 |
+
st.session_state.audio_bytes = None
|
84 |
+
if "image_uploaded" not in st.session_state:
|
85 |
+
st.session_state.image_uploaded = False
|
86 |
|
87 |
+
# --- UI Components ---
|
88 |
+
uploaded_file = st.file_uploader("Choose a landscape image...", type=["jpg", "jpeg", "png"]) # [5]
|
89 |
|
|
|
90 |
if uploaded_file is not None:
|
91 |
+
st.session_state.image_uploaded = True
|
92 |
+
image = Image.open(uploaded_file).convert("RGB") # Ensure image is in RGB format
|
93 |
+
st.image(image, caption="Uploaded Image", use_column_width=True) # [6]
|
94 |
+
|
95 |
+
# Button to trigger the generation pipeline
|
96 |
+
if st.button("Generate Soundscape"):
|
97 |
+
st.session_state.audio_bytes = None # Clear previous audio
|
98 |
+
|
99 |
+
with st.spinner("Generating soundscape... This may take a moment."): # [4]
|
100 |
+
try:
|
101 |
+
# 1. Load BLIP model and generate caption (hidden from user)
|
102 |
+
captioner = load_blip_model()
|
103 |
+
if captioner is None:
|
104 |
+
st.error("Image captioning model could not be loaded. Please try again.")
|
105 |
+
st.session_state.image_uploaded = False # Reset to allow re-upload
|
106 |
+
st.stop()
|
107 |
+
|
108 |
+
# Generate caption
|
109 |
+
# The BLIP pipeline expects a PIL Image object directly
|
110 |
+
caption_results = captioner(image)
|
111 |
+
# Extract the generated text from the pipeline's output [7]
|
112 |
+
generated_caption = caption_results['generated_text']
|
113 |
+
|
114 |
+
# Optional: Enhance prompt for soundscape generation
|
115 |
+
# This helps guide the audio model towards environmental sounds
|
116 |
+
soundscape_prompt = f"A soundscape of {generated_caption}"
|
117 |
+
|
118 |
+
# 2. Load Stable Audio model and generate audio
|
119 |
+
audio_pipeline = load_stable_audio_model()
|
120 |
+
if audio_pipeline is None:
|
121 |
+
st.error("Audio generation model could not be loaded. Please try again.")
|
122 |
+
st.session_state.image_uploaded = False # Reset to allow re-upload
|
123 |
+
st.stop()
|
124 |
+
|
125 |
+
# Generate audio with optimized parameters for speed [8, 9]
|
126 |
+
# num_inference_steps: Lower for faster generation, higher for better quality
|
127 |
+
# audio_end_in_s: Shorter audio for faster generation
|
128 |
+
# negative_prompt: Helps improve perceived quality [9]
|
129 |
+
audio_output = audio_pipeline(
|
130 |
+
prompt=soundscape_prompt,
|
131 |
+
num_inference_steps=50, # Tuned for faster generation [9]
|
132 |
+
audio_end_in_s=10.0, # 10 seconds audio length [8]
|
133 |
+
negative_prompt="low quality, average quality, distorted" # [9]
|
134 |
+
)
|
135 |
+
|
136 |
+
# Extract the NumPy array and sample rate [10]
|
137 |
+
audio_numpy_array = audio_output.audios
|
138 |
+
sample_rate = audio_pipeline.config.sampling_rate
|
139 |
+
|
140 |
+
# 3. Convert NumPy array to WAV bytes and store in session state
|
141 |
+
st.session_state.audio_bytes = convert_numpy_to_wav_bytes(audio_numpy_array, sample_rate)
|
142 |
+
|
143 |
+
st.success("Soundscape generated successfully!")
|
144 |
+
|
145 |
+
except Exception as e:
|
146 |
+
st.error(f"An error occurred during generation: {e}") # [11]
|
147 |
+
st.session_state.audio_bytes = None # Clear any partial audio
|
148 |
+
st.session_state.image_uploaded = False # Reset to allow re-upload
|
149 |
+
st.exception(e) # Display full traceback for debugging [11]
|
150 |
+
|
151 |
+
# Display generated soundscape if available in session state
|
152 |
+
if st.session_state.audio_bytes:
|
153 |
+
st.subheader("Generated Soundscape:")
|
154 |
+
st.audio(st.session_state.audio_bytes, format='audio/wav') # [6, 12]
|
155 |
+
st.markdown("You can download the audio using the controls above.")
|
156 |
+
|
157 |
+
# Reset button for new image upload
|
158 |
+
if st.session_state.image_uploaded and st.button("Upload New Image"):
|
159 |
+
st.session_state.audio_bytes = None
|
160 |
+
st.session_state.image_uploaded = False
|
161 |
+
st.rerun() # Rerun the app to clear the file uploader
|