preSalesAIAutomation commited on
Commit
d38c396
·
verified ·
1 Parent(s): cdbcd1b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +90 -68
app.py CHANGED
@@ -26,48 +26,33 @@ pipe.to("cuda")
26
  pipe_upsample.to("cuda")
27
  pipe.vae.enable_tiling()
28
 
29
- def prepare_image_condition(image, size=(512, 512), background=(0, 0, 0)):
30
  image = ImageOps.contain(image, size)
31
  canvas = Image.new("RGB", size, background)
32
  offset = ((size[0] - image.width) // 2, (size[1] - image.height) // 2)
33
  canvas.paste(image, offset)
34
  return canvas
35
 
36
- def round_to_nearest_resolution(height, width, ratio):
37
- return height - (height % ratio), width - (width % ratio)
38
-
39
  @spaces.GPU(duration=180)
40
  def generate_video(prompt, image_url):
41
  generator = torch.Generator("cuda").manual_seed(42)
42
-
43
- # Aspect-ratio preserving image prep
44
  image = None
45
  if image_url:
46
  raw_image = Image.open(BytesIO(requests.get(image_url).content)).convert("RGB")
47
  image = prepare_image_condition(raw_image)
48
-
49
- # Dimensions
50
- base_width, base_height = 512, 512
51
- downscale = 2 / 3
52
- # Use correct rounding for VAE compatibility
53
- w_d, h_d = round_to_nearest_resolution(
54
- int(base_width * downscale),
55
- int(base_height * downscale),
56
- ratio=pipe.vae_spatial_compression_ratio
57
- )
58
- # Upscaled dimensions must also be VAE-aligned
59
- w_up, h_up = round_to_nearest_resolution(
60
- base_width,
61
- base_height,
62
- ratio=pipe.vae_spatial_compression_ratio
63
- )
64
-
65
- # Step 1: Generate latents
66
  latents = pipe(
67
  prompt=prompt,
68
  image=image,
69
- width=w_d,
70
- height=h_d,
71
  num_frames=60,
72
  num_inference_steps=7,
73
  output_type="latent",
@@ -76,61 +61,98 @@ def generate_video(prompt, image_url):
76
  decode_noise_scale=0.025,
77
  generator=generator
78
  ).frames
79
-
80
  torch.cuda.empty_cache()
81
  gc.collect()
82
-
83
- # Step 2: Upscale
84
- upscaled = pipe_upsample(latents=latents, output_type="latent").frames
85
-
86
  torch.cuda.empty_cache()
87
  gc.collect()
88
-
89
- # Step 3: Decode to frames (must match rounded base)
90
- frames = pipe(
91
- prompt=prompt,
92
- image=image,
93
- latents=upscaled,
94
- width=w_up,
95
- height=h_up,
96
- num_frames=60,
97
- num_inference_steps=10,
98
- output_type="pil",
99
- guidance_scale=1.0,
100
- decode_timestep=0.05,
101
- decode_noise_scale=0.025,
102
- image_cond_noise_scale=0.025,
103
- denoise_strength=0.3,
104
- generator=generator
105
- ).frames[0]
106
-
107
  # Step 4: Export video
108
  video_path = "output.mp4"
109
- export_to_video(frames, video_path, fps=24)
110
-
111
  # Step 5: TTS
112
  tts = gTTS(text=prompt, lang='en')
113
  tts.save("voice.mp3")
114
  AudioSegment.from_mp3("voice.mp3").export("voice.wav", format="wav")
115
-
116
- # Step 6: Subtitles (CPU)
117
  model = whisper.load_model("base", device="cpu")
118
  result = model.transcribe("voice.wav", task="transcribe", language="en")
 
 
 
 
 
 
 
 
 
119
  with open("subtitles.srt", "w", encoding="utf-8") as f:
120
- f.write(result["srt"])
121
-
122
  # Step 7: Merge video + audio + subtitles
123
  final_output = "final_with_audio.mp4"
124
- ffmpeg.input(video_path).output(
125
- final_output,
126
- vf="subtitles=subtitles.srt",
127
- i="voice.mp3",
128
- c="copy",
129
- shortest=None,
130
- loglevel="error"
131
- ).run()
132
-
133
- return final_output
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
134
 
135
  # Gradio UI
136
  demo = gr.Interface(
@@ -141,7 +163,7 @@ demo = gr.Interface(
141
  ],
142
  outputs=gr.Video(label="Generated Video"),
143
  title="🎬 LTX AI Video Generator",
144
- description="AI-powered video with voiceover and subtitles. Supports ZeroGPU (PyTorch) runtime."
145
  )
146
 
147
- demo.launch()
 
26
  pipe_upsample.to("cuda")
27
  pipe.vae.enable_tiling()
28
 
29
+ def prepare_image_condition(image, size=(480, 480), background=(0, 0, 0)):
30
  image = ImageOps.contain(image, size)
31
  canvas = Image.new("RGB", size, background)
32
  offset = ((size[0] - image.width) // 2, (size[1] - image.height) // 2)
33
  canvas.paste(image, offset)
34
  return canvas
35
 
 
 
 
36
  @spaces.GPU(duration=180)
37
  def generate_video(prompt, image_url):
38
  generator = torch.Generator("cuda").manual_seed(42)
39
+
40
+ # Load & prepare image
41
  image = None
42
  if image_url:
43
  raw_image = Image.open(BytesIO(requests.get(image_url).content)).convert("RGB")
44
  image = prepare_image_condition(raw_image)
45
+
46
+ # Set target resolutions
47
+ base_width, base_height = 480, 480 # final size (must be divisible by 16)
48
+ down_width, down_height = 320, 320 # for latent generation (must also be divisible by 16)
49
+
50
+ # Step 1: Generate latents at lower resolution
 
 
 
 
 
 
 
 
 
 
 
 
51
  latents = pipe(
52
  prompt=prompt,
53
  image=image,
54
+ width=down_width,
55
+ height=down_height,
56
  num_frames=60,
57
  num_inference_steps=7,
58
  output_type="latent",
 
61
  decode_noise_scale=0.025,
62
  generator=generator
63
  ).frames
64
+
65
  torch.cuda.empty_cache()
66
  gc.collect()
67
+
68
+ # Step 2: Upscale latents
69
+ upscaled_latents = pipe_upsample(latents=latents, output_type="latent").frames
70
+
71
  torch.cuda.empty_cache()
72
  gc.collect()
73
+
74
+ # Step 3: Decode upscaled latents to frames
75
+ # Use the VAE decoder directly instead of the full pipeline
76
+ frames = pipe.vae.decode(upscaled_latents).sample
77
+ frames = (frames / 2 + 0.5).clamp(0, 1) # Normalize to [0, 1]
78
+ frames = (frames * 255).to(torch.uint8) # Convert to uint8
79
+
80
+ # Convert tensor to PIL Images
81
+ pil_frames = []
82
+ for i in range(frames.shape[2]): # num_frames dimension
83
+ frame = frames[0, :, i, :, :].permute(1, 2, 0).cpu().numpy()
84
+ pil_frames.append(Image.fromarray(frame))
85
+
86
+ torch.cuda.empty_cache()
87
+ gc.collect()
88
+
 
 
 
89
  # Step 4: Export video
90
  video_path = "output.mp4"
91
+ export_to_video(pil_frames, video_path, fps=24)
92
+
93
  # Step 5: TTS
94
  tts = gTTS(text=prompt, lang='en')
95
  tts.save("voice.mp3")
96
  AudioSegment.from_mp3("voice.mp3").export("voice.wav", format="wav")
97
+
98
+ # Step 6: Subtitles
99
  model = whisper.load_model("base", device="cpu")
100
  result = model.transcribe("voice.wav", task="transcribe", language="en")
101
+
102
+ # Generate SRT subtitles manually since result["srt"] might not be available
103
+ srt_content = ""
104
+ for i, segment in enumerate(result["segments"]):
105
+ start_time = format_time(segment["start"])
106
+ end_time = format_time(segment["end"])
107
+ text = segment["text"].strip()
108
+ srt_content += f"{i + 1}\n{start_time} --> {end_time}\n{text}\n\n"
109
+
110
  with open("subtitles.srt", "w", encoding="utf-8") as f:
111
+ f.write(srt_content)
112
+
113
  # Step 7: Merge video + audio + subtitles
114
  final_output = "final_with_audio.mp4"
115
+ try:
116
+ (
117
+ ffmpeg
118
+ .input(video_path)
119
+ .output(
120
+ final_output,
121
+ vf="subtitles=subtitles.srt",
122
+ **{"c:v": "libx264", "c:a": "aac"},
123
+ loglevel="error"
124
+ )
125
+ .run(overwrite_output=True)
126
+ )
127
+
128
+ # Add audio track
129
+ (
130
+ ffmpeg
131
+ .input(final_output)
132
+ .input("voice.wav")
133
+ .output(
134
+ "final_complete.mp4",
135
+ **{"c:v": "copy", "c:a": "aac"},
136
+ shortest=None,
137
+ loglevel="error"
138
+ )
139
+ .run(overwrite_output=True)
140
+ )
141
+
142
+ return "final_complete.mp4"
143
+
144
+ except Exception as e:
145
+ print(f"FFmpeg error: {e}")
146
+ # Fallback: return video without audio/subtitles
147
+ return video_path
148
+
149
+ def format_time(seconds):
150
+ """Convert seconds to SRT time format"""
151
+ hours = int(seconds // 3600)
152
+ minutes = int((seconds % 3600) // 60)
153
+ secs = int(seconds % 60)
154
+ millisecs = int((seconds % 1) * 1000)
155
+ return f"{hours:02d}:{minutes:02d}:{secs:02d},{millisecs:03d}"
156
 
157
  # Gradio UI
158
  demo = gr.Interface(
 
163
  ],
164
  outputs=gr.Video(label="Generated Video"),
165
  title="🎬 LTX AI Video Generator",
166
+ description="AI-powered video with voiceover and subtitles. Now outputs at 480x480 resolution."
167
  )
168
 
169
+ demo.launch()