openfree commited on
Commit
0329637
·
verified ·
1 Parent(s): ff37225

Update sonic.py

Browse files
Files changed (1) hide show
  1. sonic.py +184 -139
sonic.py CHANGED
@@ -1,109 +1,122 @@
1
- """
2
- sonic.py – 2025-05 hot-fix
3
- 주요 수정
4
- • config.pretrained_model_name_or_path 가 실제 폴더인지 확인
5
- • 없다면 huggingface_hub.snapshot_download 로 자동 다운로드
6
- • 경로가 준비된 뒤 모델 로드 진행
7
- """
8
- import os, math, torch, cv2
9
  from PIL import Image
10
  from omegaconf import OmegaConf
11
- from tqdm.auto import tqdm
12
- from diffusers import AutoencoderKLTemporalDecoder, EulerDiscreteScheduler
 
 
13
  from transformers import WhisperModel, CLIPVisionModelWithProjection, AutoFeatureExtractor
14
- from huggingface_hub import snapshot_download, hf_hub_download
15
  from src.utils.util import save_videos_grid, seed_everything
16
  from src.dataset.test_preprocess import process_bbox, image_audio_to_tensor
17
- from src.models.base.unet_spatio_temporal_condition import UNetSpatioTemporalConditionModel, add_ip_adapters
 
 
18
  from src.pipelines.pipeline_sonic import SonicPipeline
19
  from src.models.audio_adapter.audio_proj import AudioProjModel
20
  from src.models.audio_adapter.audio_to_bucket import Audio2bucketModel
21
  from src.utils.RIFE.RIFE_HDv3 import RIFEModel
22
  from src.dataset.face_align.align import AlignImage
23
- # ------------------------------
 
 
 
 
 
24
  BASE_DIR = os.path.dirname(os.path.abspath(__file__))
25
- HF_STABLE_REPO = "stabilityai/stable-video-diffusion-img2vid-xt"
26
- LOCAL_STABLE_DIR = os.path.join(BASE_DIR, "checkpoints", "stable-video-diffusion-img2vid-xt")
27
 
28
 
29
- # ------------------------------------------------------------------
30
- # single image + speech → video tensor
31
- # ------------------------------------------------------------------
32
- def test(pipe, cfg, wav_enc, audio_pe, audio2bucket, img_enc,
 
 
 
 
 
 
 
 
 
 
 
 
 
33
  width, height, batch):
34
 
35
- # --- batch 차원 맞추기 ------------------------------------------
36
  for k, v in batch.items():
37
  if isinstance(v, torch.Tensor):
38
- batch[k] = v.unsqueeze(0).float().to(pipe.device)
39
 
40
- ref_img = batch['ref_img']
41
- clip_img = batch['clip_images']
42
- face_mask = batch['face_mask']
43
- img_emb = img_enc(clip_img).image_embeds # (1,1024)
44
 
45
- audio_feat = batch['audio_feature'] # (1,80,T)
46
- audio_len = int(batch['audio_len'])
47
- step = max(1, int(cfg.step)) # 안전 보정
48
 
49
- window = 16_000 # 1-초 chunk
50
- prompt_list, last_list = [], []
51
 
52
- for i in range(0, audio_feat.shape[-1], window):
53
- chunk = audio_feat[:, :, i:i+window]
54
- hs = wav_enc.encoder(chunk, output_hidden_states=True).hidden_states
55
- prompt_list.append(torch.stack(hs, 2)) # (1,80,L,384)
56
- last = wav_enc.encoder(chunk).last_hidden_state.unsqueeze(-2)
57
- last_list.append(last) # (1,80,1,384)
58
 
59
- if not prompt_list:
60
- raise ValueError(" No speech recognised in audio.")
61
 
62
- audio_prompts = torch.cat(prompt_list, 1) # (1,80,*L,384)
63
- last_prompts = torch.cat(last_list, 1) # (1,80,*1,384)
64
 
65
- # pad 규칙 (모델 원 논문과 동일)
66
- audio_prompts = torch.cat([ torch.zeros_like(audio_prompts[:,:4]),
67
- audio_prompts,
68
- torch.zeros_like(audio_prompts[:,:6]) ], 1)
69
- last_prompts = torch.cat([ torch.zeros_like(last_prompts[:,:24]),
70
- last_prompts,
71
- torch.zeros_like(last_prompts[:,:26]) ], 1)
72
 
73
- # --------------------------------------------------------------
74
- total_tok = audio_prompts.shape[1]
75
- n_chunks = max(1, math.ceil(total_tok / (2*step)))
 
 
 
 
76
 
77
- ref_L, aud_L, uncond_L, buckets = [], [], [], []
78
 
79
- for i in tqdm(range(n_chunks), ncols=0):
80
  st = i * 2 * step
 
 
 
 
81
 
82
- # 조건 오디오 토큰(pad → 10×5×384)
83
- cond = audio_prompts[:, st:st+10] # (1,80,10,384) → (1,10,8,384)?
84
- cond = cond[:, :10] # f = 10
85
- cond = cond.permute(0,2,1,3) # (1,10,80,384)
86
- cond = cond.reshape(1, 10, 10, 5, 384) # ★ w=10, b=5 (zero-pad auto)
87
- # bucket 추정용 토큰
88
- buck = last_prompts[:, st:st+50] # (1,80,50,384)
89
- if buck.shape[1] < 50:
90
- pad = torch.zeros(1, 50-buck.shape[1], *buck.shape[2:], device=buck.device)
91
- buck = torch.cat([buck, pad], 1)
92
- buck = buck[:, :50].permute(0,2,1,3).reshape(1, 50, 10, 5, 384)
93
-
94
- motion = audio2bucket(buck, img_emb) * 16 + 16
95
-
96
- ref_L.append(ref_img[0])
97
- aud_L.append(audio_pe(cond).squeeze(0)) # (10,1024)
98
- uncond_L.append(audio_pe(torch.zeros_like(cond)).squeeze(0))
99
  buckets.append(motion[0])
100
 
101
- # -------------- diffusion -------------------------------------------------
102
- vid = pipe(
103
  ref_img, clip_img, face_mask,
104
- aud_L, uncond_L, buckets,
105
  height=height, width=width,
106
- num_frames=len(aud_L),
107
  decode_chunk_size=cfg.decode_chunk_size,
108
  motion_bucket_scale=cfg.motion_bucket_scale,
109
  fps=cfg.fps,
@@ -119,13 +132,12 @@ def test(pipe, cfg, wav_enc, audio_pe, audio2bucket, img_enc,
119
  i2i_noise_strength=cfg.i2i_noise_strength,
120
  ).frames
121
 
122
- return (vid*0.5+0.5).clamp(0,1).to(pipe.device).unsqueeze(0).cpu()
123
-
124
 
125
- # ------------------------------------------------------------------
126
- # Sonic wrapper
127
- # ------------------------------------------------------------------
128
 
 
 
 
129
  class Sonic:
130
  config_file = os.path.join(BASE_DIR, "config/inference/sonic.yaml")
131
  config = OmegaConf.load(config_file)
@@ -135,92 +147,125 @@ class Sonic:
135
  cfg.use_interframe = enable_interpolate_frame
136
  self.device = f"cuda:{device_id}" if torch.cuda.is_available() and device_id >= 0 else "cpu"
137
 
138
- # ----------- ✨ [NEW] pretrained 모델 폴더 확보 ----------------------
139
- if not os.path.isdir(LOCAL_STABLE_DIR) or not os.path.isfile(os.path.join(LOCAL_STABLE_DIR, "vae", "config.json")):
140
- print("[INFO] 1st-run downloading base model (~2 GB)…")
141
- snapshot_download(repo_id=HF_STABLE_REPO,
142
- local_dir=LOCAL_STABLE_DIR,
143
- resume_download=True,
144
- local_dir_use_symlinks=False)
145
- cfg.pretrained_model_name_or_path = LOCAL_STABLE_DIR
146
- # ------------------------------------------------------------------
147
-
148
- self._load_models(cfg)
149
- print("Sonic init done")
150
 
 
 
151
 
152
- # model-loader (unchanged, but with tiny clean-ups) ------------------------
153
- def _load_models(self, cfg):
154
  dtype = {"fp16": torch.float16, "fp32": torch.float32, "bf16": torch.bfloat16}[cfg.weight_dtype]
155
 
 
156
  vae = AutoencoderKLTemporalDecoder.from_pretrained(cfg.pretrained_model_name_or_path, subfolder="vae", variant="fp16")
157
  sched = EulerDiscreteScheduler.from_pretrained(cfg.pretrained_model_name_or_path, subfolder="scheduler")
158
- img_enc = CLIPVisionModelWithProjection.from_pretrained(cfg.pretrained_model_name_or_path, subfolder="image_encoder", variant="fp16")
159
  unet = UNetSpatioTemporalConditionModel.from_pretrained(cfg.pretrained_model_name_or_path, subfolder="unet", variant="fp16")
160
  add_ip_adapters(unet, [32], [cfg.ip_audio_scale])
161
 
162
- self.audio2token = AudioProjModel(10, 5, 384, 1024, 1024, 32).to(self.device)
163
- self.audio2bucket = Audio2bucketModel(50, 1, 384, 1024, 1024, 1, 2).to(self.device)
164
-
165
- unet.load_state_dict (torch.load(os.path.join(BASE_DIR, cfg.unet_checkpoint_path), map_location="cpu"))
166
- self.audio2token.load_state_dict (torch.load(os.path.join(BASE_DIR, cfg.audio2token_checkpoint_path), map_location="cpu"))
167
- self.audio2bucket.load_state_dict(torch.load(os.path.join(BASE_DIR, cfg.audio2bucket_checkpoint_path), map_location="cpu"))
168
-
169
- self.whisper = WhisperModel.from_pretrained(os.path.join(BASE_DIR, "checkpoints/whisper-tiny")).to(self.device).eval()
170
- self.whisper.requires_grad_(False)
171
-
172
- self.feature_extractor = AutoFeatureExtractor.from_pretrained(os.path.join(BASE_DIR, "checkpoints/whisper-tiny"))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
173
  self.face_det = AlignImage(self.device, det_path=os.path.join(BASE_DIR, "checkpoints/yoloface_v5m.pt"))
174
  if cfg.use_interframe:
175
- self.rife = RIFEModel(device=self.device); self.rife.load_model(os.path.join(BASE_DIR, "checkpoints/RIFE/"))
 
 
 
 
176
 
177
- for m in (img_enc, vae, unet): m.to(dtype)
178
- self.pipe = SonicPipeline(unet=unet, image_encoder=img_enc, vae=vae, scheduler=sched).to(device=self.device, dtype=dtype)
179
- self.image_encoder = img_enc
 
 
180
 
181
- # ------------------------------------------------------------------
182
- def preprocess(self, img_path, expand_ratio=1.0):
183
- img = cv2.imread(img_path)
184
- _, _, boxes = self.face_det(img, maxface=True)
185
- if boxes:
186
- x,y,w,h = boxes[0]; return {"face_num":1,"crop_bbox":process_bbox((x,y,x+w,y+h),expand_ratio,*img.shape[:2])}
187
- return {"face_num":0,"crop_bbox":None}
 
 
 
188
 
189
- # ------------------------------------------------------------------
190
  @torch.no_grad()
191
- def process(self, img_path, wav_path, out_path,
192
- min_resolution=512, inference_steps=25,
193
- dynamic_scale=1.0, keep_resolution=False, seed=None):
194
 
195
  cfg = self.config
196
- if seed is not None: cfg.seed = seed
 
197
  cfg.num_inference_steps = inference_steps
198
  cfg.motion_bucket_scale = dynamic_scale
199
  seed_everything(cfg.seed)
200
 
201
- sample = image_audio_to_tensor(
 
202
  self.face_det, self.feature_extractor,
203
- img_path, wav_path, limit=-1,
204
- image_size=min_resolution, area=cfg.area,
205
  )
206
- if sample is None: return -1
 
207
 
208
- h,w = sample['ref_img'].shape[-2:]
209
- resolution = (f"{Image.open(img_path).width//2*2}x{Image.open(img_path).height//2*2}"
210
- if keep_resolution else f"{w}x{h}")
 
 
 
211
 
212
  video = test(self.pipe, cfg, self.whisper, self.audio2token,
213
- self.audio2bucket, self.image_encoder, w, h, sample)
 
214
 
215
- if cfg.use_interframe: # RIFE interpolation
216
- out = video.to(self.device); frames=[]
 
217
  for i in tqdm(range(out.shape[2]-1), ncols=0):
218
- mid = self.rife.inference(out[:,:,i], out[:,:,i+1]).clamp(0,1)
219
- frames += [out[:,:,i], mid]
220
- frames.append(out[:,:,-1]); video = torch.stack(frames,2).cpu()
221
-
222
- tmp = out_path.replace(".mp4","_noaudio.mp4")
223
- save_videos_grid(video, tmp, n_rows=video.shape[0], fps=cfg.fps*(2 if cfg.use_interframe else 1))
224
- os.system(f"ffmpeg -i '{tmp}' -i '{wav_path}' -s {resolution} "
225
- f"-vcodec libx264 -acodec aac -crf 18 -shortest '{out_path}' -y -loglevel error")
226
- os.remove(tmp); return 0
 
 
 
 
 
1
+ # sonic.py (전체 파일)
2
+
3
+ import os, math, glob, torch, cv2
 
 
 
 
 
4
  from PIL import Image
5
  from omegaconf import OmegaConf
6
+ from tqdm import tqdm
7
+
8
+ from diffusers import AutoencoderKLTemporalDecoder
9
+ from diffusers.schedulers import EulerDiscreteScheduler
10
  from transformers import WhisperModel, CLIPVisionModelWithProjection, AutoFeatureExtractor
11
+
12
  from src.utils.util import save_videos_grid, seed_everything
13
  from src.dataset.test_preprocess import process_bbox, image_audio_to_tensor
14
+ from src.models.base.unet_spatio_temporal_condition import (
15
+ UNetSpatioTemporalConditionModel, add_ip_adapters,
16
+ )
17
  from src.pipelines.pipeline_sonic import SonicPipeline
18
  from src.models.audio_adapter.audio_proj import AudioProjModel
19
  from src.models.audio_adapter.audio_to_bucket import Audio2bucketModel
20
  from src.utils.RIFE.RIFE_HDv3 import RIFEModel
21
  from src.dataset.face_align.align import AlignImage
22
+
23
+ try:
24
+ from safetensors.torch import load_file as safe_load
25
+ except ImportError: # safetensors 가 없으면 torch.load 만 사용
26
+ safe_load = None
27
+
28
  BASE_DIR = os.path.dirname(os.path.abspath(__file__))
 
 
29
 
30
 
31
+ # -------------------------------------------------------------------
32
+ # 공용 : 체크포인트(가중치) 탐색 함수
33
+ # -------------------------------------------------------------------
34
+ def _find_ckpt(root: str, keyword: str):
35
+ """root 아래에서 keyword 가 포함된 .pth / .pt / .safetensors 파일 검색"""
36
+ patterns = [f"**/*{keyword}*.pth", f"**/*{keyword}*.pt",
37
+ f"**/*{keyword}*.safetensors"]
38
+ files = []
39
+ for p in patterns:
40
+ files.extend(glob.glob(os.path.join(root, p), recursive=True))
41
+ return files[0] if files else None
42
+
43
+
44
+ # -------------------------------------------------------------------
45
+ # single image + speech → video tensor
46
+ # -------------------------------------------------------------------
47
+ def test(pipe, cfg, wav_enc, audio_pe, audio2bucket, image_encoder,
48
  width, height, batch):
49
 
50
+ # 배치 차원 맞추기
51
  for k, v in batch.items():
52
  if isinstance(v, torch.Tensor):
53
+ batch[k] = v.unsqueeze(0).to(pipe.device).float()
54
 
55
+ ref_img = batch["ref_img"]
56
+ clip_img = batch["clip_images"]
57
+ face_mask = batch["face_mask"]
58
+ image_embeds = image_encoder(clip_img).image_embeds
59
 
60
+ audio_feature = batch["audio_feature"] # (1,80,T)
61
+ audio_len = int(batch["audio_len"])
62
+ step = max(1, int(cfg.step),) # 최소 1
63
 
64
+ window = 16_000 # 1 단위
65
+ audio_prompts, last_prompts = [], []
66
 
67
+ for i in range(0, audio_feature.shape[-1], window):
68
+ chunk = audio_feature[:, :, i:i+window]
69
+ hidden_layers = wav_enc.encoder(chunk, output_hidden_states=True).hidden_states
70
+ last_hidden = wav_enc.encoder(chunk).last_hidden_state.unsqueeze(-2)
71
+ audio_prompts.append(torch.stack(hidden_layers, dim=2))
72
+ last_prompts.append(last_hidden)
73
 
74
+ if not audio_prompts:
75
+ raise ValueError("[ERROR] No speech recognised in the provided audio.")
76
 
77
+ audio_prompts = torch.cat(audio_prompts, dim=1)
78
+ last_prompts = torch.cat(last_prompts , dim=1)
79
 
80
+ # padding 규칙
81
+ audio_prompts = torch.cat(
82
+ [torch.zeros_like(audio_prompts[:, :4]),
83
+ audio_prompts,
84
+ torch.zeros_like(audio_prompts[:, :6])], dim=1)
 
 
85
 
86
+ last_prompts = torch.cat(
87
+ [torch.zeros_like(last_prompts[:, :24]),
88
+ last_prompts,
89
+ torch.zeros_like(last_prompts[:, :26])], dim=1)
90
+
91
+ total_tokens = audio_prompts.shape[1]
92
+ num_chunks = max(1, math.ceil(total_tokens / (2*step)))
93
 
94
+ ref_list, audio_list, uncond_list, buckets = [], [], [], []
95
 
96
+ for i in tqdm(range(num_chunks)):
97
  st = i * 2 * step
98
+ cond = audio_prompts[:, st: st+10]
99
+ if cond.shape[2] < 10:
100
+ pad = torch.zeros_like(cond[:, :, :10-cond.shape[2]])
101
+ cond = torch.cat([cond, pad], dim=2)
102
 
103
+ bucket_clip = last_prompts[:, st: st+50]
104
+ if bucket_clip.shape[2] < 50:
105
+ pad = torch.zeros_like(bucket_clip[:, :, :50-bucket_clip.shape[2]])
106
+ bucket_clip = torch.cat([bucket_clip, pad], dim=2)
107
+
108
+ motion = audio2bucket(bucket_clip, image_embeds) * 16 + 16
109
+
110
+ ref_list.append(ref_img[0])
111
+ audio_list.append(audio_pe(cond).squeeze(0))
112
+ uncond_list.append(audio_pe(torch.zeros_like(cond)).squeeze(0))
 
 
 
 
 
 
 
113
  buckets.append(motion[0])
114
 
115
+ video = pipe(
 
116
  ref_img, clip_img, face_mask,
117
+ audio_list, uncond_list, buckets,
118
  height=height, width=width,
119
+ num_frames=len(audio_list),
120
  decode_chunk_size=cfg.decode_chunk_size,
121
  motion_bucket_scale=cfg.motion_bucket_scale,
122
  fps=cfg.fps,
 
132
  i2i_noise_strength=cfg.i2i_noise_strength,
133
  ).frames
134
 
135
+ return (video * 0.5 + 0.5).clamp(0, 1).unsqueeze(0).cpu()
 
136
 
 
 
 
137
 
138
+ # -------------------------------------------------------------------
139
+ # Sonic ✨
140
+ # -------------------------------------------------------------------
141
  class Sonic:
142
  config_file = os.path.join(BASE_DIR, "config/inference/sonic.yaml")
143
  config = OmegaConf.load(config_file)
 
147
  cfg.use_interframe = enable_interpolate_frame
148
  self.device = f"cuda:{device_id}" if torch.cuda.is_available() and device_id >= 0 else "cpu"
149
 
150
+ # 가중치 루트
151
+ ckpt_root = os.path.join(BASE_DIR, "checkpoints", "Sonic")
152
+ cfg.pretrained_model_name_or_path = ckpt_root # diffusers 형식
 
 
 
 
 
 
 
 
 
153
 
154
+ self._load_models(cfg, ckpt_root)
155
+ print("Sonic init done")
156
 
157
+ # --------------------------------------------------------------
158
+ def _load_models(self, cfg, ckpt_root):
159
  dtype = {"fp16": torch.float16, "fp32": torch.float32, "bf16": torch.bfloat16}[cfg.weight_dtype]
160
 
161
+ # diffusers 기본 가중치
162
  vae = AutoencoderKLTemporalDecoder.from_pretrained(cfg.pretrained_model_name_or_path, subfolder="vae", variant="fp16")
163
  sched = EulerDiscreteScheduler.from_pretrained(cfg.pretrained_model_name_or_path, subfolder="scheduler")
164
+ image_enc = CLIPVisionModelWithProjection.from_pretrained(cfg.pretrained_model_name_or_path, subfolder="image_encoder", variant="fp16")
165
  unet = UNetSpatioTemporalConditionModel.from_pretrained(cfg.pretrained_model_name_or_path, subfolder="unet", variant="fp16")
166
  add_ip_adapters(unet, [32], [cfg.ip_audio_scale])
167
 
168
+ # ------------ 추가 체크포인트 (.pth / .safetensors) ------------
169
+ def _try_load(module, keyword):
170
+ path = _find_ckpt(ckpt_root, keyword)
171
+ if not path:
172
+ print(f"[WARN] {keyword} checkpoint not found → skip")
173
+ return
174
+ print(f"[INFO] load {keyword} ckpt → {os.path.relpath(path, BASE_DIR)}")
175
+ if path.endswith(".safetensors") and safe_load is not None:
176
+ state = safe_load(path, device="cpu")
177
+ else:
178
+ state = torch.load(path, map_location="cpu")
179
+ module.load_state_dict(state, strict=False)
180
+
181
+ _try_load(unet, "unet")
182
+ # audio adapters (필수)
183
+ a2t = AudioProjModel(10, 5, 384, 1024, 1024, 32).to(self.device)
184
+ a2b = Audio2bucketModel(50, 1, 384, 1024, 1024, 1, 2).to(self.device)
185
+ _try_load(a2t, "audio2token")
186
+ _try_load(a2b, "audio2bucket")
187
+
188
+ # whisper tiny
189
+ whisper = WhisperModel.from_pretrained(
190
+ os.path.join(BASE_DIR, "checkpoints/whisper-tiny")
191
+ ).to(self.device).eval()
192
+ whisper.requires_grad_(False)
193
+
194
+ self.feature_extractor = AutoFeatureExtractor.from_pretrained(
195
+ os.path.join(BASE_DIR, "checkpoints/whisper-tiny")
196
+ )
197
  self.face_det = AlignImage(self.device, det_path=os.path.join(BASE_DIR, "checkpoints/yoloface_v5m.pt"))
198
  if cfg.use_interframe:
199
+ self.rife = RIFEModel(device=self.device)
200
+ self.rife.load_model(os.path.join(BASE_DIR, "checkpoints/RIFE/"))
201
+
202
+ for m in (image_enc, vae, unet):
203
+ m.to(dtype)
204
 
205
+ self.pipe = SonicPipeline(unet=unet, image_encoder=image_enc, vae=vae, scheduler=sched).to(device=self.device, dtype=dtype)
206
+ self.image_encoder = image_enc
207
+ self.audio2token = a2t
208
+ self.audio2bucket = a2b
209
+ self.whisper = whisper
210
 
211
+ # --------------------------------------------------------------
212
+ def preprocess(self, image_path: str, expand_ratio: float = 1.0):
213
+ img = cv2.imread(image_path)
214
+ h, w = img.shape[:2]
215
+ _, _, bboxes = self.face_det(img, maxface=True)
216
+ if bboxes:
217
+ x1, y1, ww, hh = bboxes[0]
218
+ return {"face_num": 1,
219
+ "crop_bbox": process_bbox((x1, y1, x1+ww, y1+hh), expand_ratio, h, w)}
220
+ return {"face_num": 0, "crop_bbox": None}
221
 
222
+ # --------------------------------------------------------------
223
  @torch.no_grad()
224
+ def process(self, image_path, audio_path, output_path,
225
+ min_resolution=512, inference_steps=25, dynamic_scale=1.0,
226
+ keep_resolution=False, seed=None):
227
 
228
  cfg = self.config
229
+ if seed is not None:
230
+ cfg.seed = seed
231
  cfg.num_inference_steps = inference_steps
232
  cfg.motion_bucket_scale = dynamic_scale
233
  seed_everything(cfg.seed)
234
 
235
+ # 이미지·오디오 → tensor
236
+ data = image_audio_to_tensor(
237
  self.face_det, self.feature_extractor,
238
+ image_path, audio_path,
239
+ limit=-1, image_size=min_resolution, area=cfg.area
240
  )
241
+ if data is None:
242
+ return -1
243
 
244
+ h, w = data["ref_img"].shape[-2:]
245
+ if keep_resolution:
246
+ im = Image.open(image_path)
247
+ resolution = f"{im.width//2*2}x{im.height//2*2}"
248
+ else:
249
+ resolution = f"{w}x{h}"
250
 
251
  video = test(self.pipe, cfg, self.whisper, self.audio2token,
252
+ self.audio2bucket, self.image_encoder,
253
+ w, h, data)
254
 
255
+ # 인터프레임 보간
256
+ if cfg.use_interframe:
257
+ out, frames = video.to(self.device), []
258
  for i in tqdm(range(out.shape[2]-1), ncols=0):
259
+ mid = self.rife.inference(out[:,:,i], out[:,:,i+1]).clamp(0,1).detach()
260
+ frames.extend([out[:,:,i], mid])
261
+ frames.append(out[:,:,-1])
262
+ video = torch.stack(frames, 2).cpu()
263
+
264
+ tmp = output_path.replace(".mp4", "_noaudio.mp4")
265
+ save_videos_grid(video, tmp, n_rows=video.shape[0],
266
+ fps=cfg.fps*(2 if cfg.use_interframe else 1))
267
+ os.system(
268
+ f"ffmpeg -loglevel error -y -i '{tmp}' -i '{audio_path}' -s {resolution} "
269
+ f"-vcodec libx264 -acodec aac -crf 18 -shortest '{output_path}'")
270
+ os.remove(tmp)
271
+ return 0