File size: 11,388 Bytes
0329637
 
 
79d88c4
 
0329637
 
 
 
79d88c4
0329637
79d88c4
c260fe0
0329637
 
 
c260fe0
79d88c4
 
 
 
0329637
 
 
 
 
 
79d88c4
 
914dc02
0329637
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5d6304b
 
0329637
79d88c4
 
0329637
85ad908
0329637
 
 
 
3c16281
0329637
 
 
1fb410d
0329637
 
f40c908
0329637
 
 
 
 
 
0178f77
0329637
 
0178f77
0329637
 
0178f77
0329637
 
 
 
 
1fb410d
0329637
 
 
 
 
 
 
5d6304b
0329637
5d6304b
0329637
5d6304b
0329637
 
 
 
5d6304b
0329637
 
 
 
 
 
 
 
 
 
5d6304b
 
0329637
85ad908
0329637
85ad908
0329637
5d6304b
 
 
 
 
 
 
 
 
 
 
 
 
79d88c4
 
0329637
1fb410d
c260fe0
0329637
 
 
1fb410d
 
 
 
c260fe0
f40c908
85ad908
c260fe0
 
0329637
 
 
1fb410d
0329637
 
c260fe0
0329637
 
85ad908
1fb410d
0329637
5d6304b
 
0329637
5d6304b
1fb410d
79d88c4
0329637
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1fb410d
 
0329637
 
 
 
 
1fb410d
0329637
 
 
 
 
1fb410d
0329637
 
 
 
 
 
 
 
 
 
79d88c4
0329637
612b064
0329637
 
 
5d6304b
1fb410d
0329637
 
5d6304b
 
1fb410d
 
0329637
 
f40c908
0329637
 
6ee08fc
0329637
 
1fb410d
0329637
 
 
 
 
 
1fb410d
5d6304b
0329637
 
5d6304b
0329637
 
 
5d6304b
0329637
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
# sonic.py  (전체 파일)

import os, math, glob, torch, cv2
from PIL import Image
from omegaconf import OmegaConf
from tqdm import tqdm

from diffusers import AutoencoderKLTemporalDecoder
from diffusers.schedulers import EulerDiscreteScheduler
from transformers import WhisperModel, CLIPVisionModelWithProjection, AutoFeatureExtractor

from src.utils.util import save_videos_grid, seed_everything
from src.dataset.test_preprocess import process_bbox, image_audio_to_tensor
from src.models.base.unet_spatio_temporal_condition import (
    UNetSpatioTemporalConditionModel, add_ip_adapters,
)
from src.pipelines.pipeline_sonic import SonicPipeline
from src.models.audio_adapter.audio_proj import AudioProjModel
from src.models.audio_adapter.audio_to_bucket import Audio2bucketModel
from src.utils.RIFE.RIFE_HDv3 import RIFEModel
from src.dataset.face_align.align import AlignImage

try:
    from safetensors.torch import load_file as safe_load
except ImportError:       # safetensors 가 없으면 torch.load 만 사용
    safe_load = None

BASE_DIR = os.path.dirname(os.path.abspath(__file__))


# -------------------------------------------------------------------
#                 공용 : 체크포인트(가중치) 탐색 함수
# -------------------------------------------------------------------
def _find_ckpt(root: str, keyword: str):
    """root 아래에서 keyword 가 포함된 .pth / .pt / .safetensors 파일 검색"""
    patterns = [f"**/*{keyword}*.pth", f"**/*{keyword}*.pt",
                f"**/*{keyword}*.safetensors"]
    files = []
    for p in patterns:
        files.extend(glob.glob(os.path.join(root, p), recursive=True))
    return files[0] if files else None


# -------------------------------------------------------------------
#            single image + speech  →  video tensor
# -------------------------------------------------------------------
def test(pipe, cfg, wav_enc, audio_pe, audio2bucket, image_encoder,
         width, height, batch):

    # 배치 차원 맞추기
    for k, v in batch.items():
        if isinstance(v, torch.Tensor):
            batch[k] = v.unsqueeze(0).to(pipe.device).float()

    ref_img   = batch["ref_img"]
    clip_img  = batch["clip_images"]
    face_mask = batch["face_mask"]
    image_embeds = image_encoder(clip_img).image_embeds

    audio_feature = batch["audio_feature"]          # (1,80,T)
    audio_len     = int(batch["audio_len"])
    step          = max(1, int(cfg.step),)          # 최소 1

    window = 16_000                                 # 1초 단위
    audio_prompts, last_prompts = [], []

    for i in range(0, audio_feature.shape[-1], window):
        chunk = audio_feature[:, :, i:i+window]
        hidden_layers = wav_enc.encoder(chunk, output_hidden_states=True).hidden_states
        last_hidden   = wav_enc.encoder(chunk).last_hidden_state.unsqueeze(-2)
        audio_prompts.append(torch.stack(hidden_layers, dim=2))
        last_prompts.append(last_hidden)

    if not audio_prompts:
        raise ValueError("[ERROR] No speech recognised in the provided audio.")

    audio_prompts = torch.cat(audio_prompts, dim=1)
    last_prompts  = torch.cat(last_prompts , dim=1)

    # padding 규칙
    audio_prompts = torch.cat(
        [torch.zeros_like(audio_prompts[:, :4]),
         audio_prompts,
         torch.zeros_like(audio_prompts[:, :6])], dim=1)

    last_prompts = torch.cat(
        [torch.zeros_like(last_prompts[:, :24]),
         last_prompts,
         torch.zeros_like(last_prompts[:, :26])], dim=1)

    total_tokens = audio_prompts.shape[1]
    num_chunks   = max(1, math.ceil(total_tokens / (2*step)))

    ref_list, audio_list, uncond_list, buckets = [], [], [], []

    for i in tqdm(range(num_chunks)):
        st = i * 2 * step
        cond = audio_prompts[:, st: st+10]
        if cond.shape[2] < 10:
            pad = torch.zeros_like(cond[:, :, :10-cond.shape[2]])
            cond = torch.cat([cond, pad], dim=2)

        bucket_clip = last_prompts[:, st: st+50]
        if bucket_clip.shape[2] < 50:
            pad = torch.zeros_like(bucket_clip[:, :, :50-bucket_clip.shape[2]])
            bucket_clip = torch.cat([bucket_clip, pad], dim=2)

        motion = audio2bucket(bucket_clip, image_embeds) * 16 + 16

        ref_list.append(ref_img[0])
        audio_list.append(audio_pe(cond).squeeze(0))
        uncond_list.append(audio_pe(torch.zeros_like(cond)).squeeze(0))
        buckets.append(motion[0])

    video = pipe(
        ref_img, clip_img, face_mask,
        audio_list, uncond_list, buckets,
        height=height, width=width,
        num_frames=len(audio_list),
        decode_chunk_size=cfg.decode_chunk_size,
        motion_bucket_scale=cfg.motion_bucket_scale,
        fps=cfg.fps,
        noise_aug_strength=cfg.noise_aug_strength,
        min_guidance_scale1=cfg.min_appearance_guidance_scale,
        max_guidance_scale1=cfg.max_appearance_guidance_scale,
        min_guidance_scale2=cfg.audio_guidance_scale,
        max_guidance_scale2=cfg.audio_guidance_scale,
        overlap=cfg.overlap,
        shift_offset=cfg.shift_offset,
        frames_per_batch=cfg.n_sample_frames,
        num_inference_steps=cfg.num_inference_steps,
        i2i_noise_strength=cfg.i2i_noise_strength,
    ).frames

    return (video * 0.5 + 0.5).clamp(0, 1).unsqueeze(0).cpu()


# -------------------------------------------------------------------
#                          Sonic  ✨
# -------------------------------------------------------------------
class Sonic:
    config_file = os.path.join(BASE_DIR, "config/inference/sonic.yaml")
    config      = OmegaConf.load(config_file)

    def __init__(self, device_id: int = 0, enable_interpolate_frame: bool = True):
        cfg                = self.config
        cfg.use_interframe = enable_interpolate_frame
        self.device        = f"cuda:{device_id}" if torch.cuda.is_available() and device_id >= 0 else "cpu"

        # 가중치 루트
        ckpt_root = os.path.join(BASE_DIR, "checkpoints", "Sonic")
        cfg.pretrained_model_name_or_path = ckpt_root   # diffusers 형식

        self._load_models(cfg, ckpt_root)
        print("Sonic init done")

    # --------------------------------------------------------------
    def _load_models(self, cfg, ckpt_root):
        dtype = {"fp16": torch.float16, "fp32": torch.float32, "bf16": torch.bfloat16}[cfg.weight_dtype]

        # diffusers 기본 가중치
        vae = AutoencoderKLTemporalDecoder.from_pretrained(cfg.pretrained_model_name_or_path, subfolder="vae", variant="fp16")
        sched = EulerDiscreteScheduler.from_pretrained(cfg.pretrained_model_name_or_path, subfolder="scheduler")
        image_enc = CLIPVisionModelWithProjection.from_pretrained(cfg.pretrained_model_name_or_path, subfolder="image_encoder", variant="fp16")
        unet = UNetSpatioTemporalConditionModel.from_pretrained(cfg.pretrained_model_name_or_path, subfolder="unet", variant="fp16")
        add_ip_adapters(unet, [32], [cfg.ip_audio_scale])

        # ------------ 추가 체크포인트 (.pth / .safetensors) ------------
        def _try_load(module, keyword):
            path = _find_ckpt(ckpt_root, keyword)
            if not path:
                print(f"[WARN] {keyword} checkpoint not found → skip")
                return
            print(f"[INFO] load {keyword} ckpt → {os.path.relpath(path, BASE_DIR)}")
            if path.endswith(".safetensors") and safe_load is not None:
                state = safe_load(path, device="cpu")
            else:
                state = torch.load(path, map_location="cpu")
            module.load_state_dict(state, strict=False)

        _try_load(unet,      "unet")
        # audio adapters (필수)
        a2t = AudioProjModel(10, 5, 384, 1024, 1024, 32).to(self.device)
        a2b = Audio2bucketModel(50, 1, 384, 1024, 1024, 1, 2).to(self.device)
        _try_load(a2t, "audio2token")
        _try_load(a2b, "audio2bucket")

        # whisper tiny
        whisper = WhisperModel.from_pretrained(
            os.path.join(BASE_DIR, "checkpoints/whisper-tiny")
        ).to(self.device).eval()
        whisper.requires_grad_(False)

        self.feature_extractor = AutoFeatureExtractor.from_pretrained(
            os.path.join(BASE_DIR, "checkpoints/whisper-tiny")
        )
        self.face_det = AlignImage(self.device, det_path=os.path.join(BASE_DIR, "checkpoints/yoloface_v5m.pt"))
        if cfg.use_interframe:
            self.rife = RIFEModel(device=self.device)
            self.rife.load_model(os.path.join(BASE_DIR, "checkpoints/RIFE/"))

        for m in (image_enc, vae, unet):
            m.to(dtype)

        self.pipe          = SonicPipeline(unet=unet, image_encoder=image_enc, vae=vae, scheduler=sched).to(device=self.device, dtype=dtype)
        self.image_encoder = image_enc
        self.audio2token   = a2t
        self.audio2bucket  = a2b
        self.whisper       = whisper

    # --------------------------------------------------------------
    def preprocess(self, image_path: str, expand_ratio: float = 1.0):
        img = cv2.imread(image_path)
        h, w = img.shape[:2]
        _, _, bboxes = self.face_det(img, maxface=True)
        if bboxes:
            x1, y1, ww, hh = bboxes[0]
            return {"face_num": 1,
                    "crop_bbox": process_bbox((x1, y1, x1+ww, y1+hh), expand_ratio, h, w)}
        return {"face_num": 0, "crop_bbox": None}

    # --------------------------------------------------------------
    @torch.no_grad()
    def process(self, image_path, audio_path, output_path,
                min_resolution=512, inference_steps=25, dynamic_scale=1.0,
                keep_resolution=False, seed=None):

        cfg = self.config
        if seed is not None:
            cfg.seed = seed
        cfg.num_inference_steps = inference_steps
        cfg.motion_bucket_scale = dynamic_scale
        seed_everything(cfg.seed)

        # 이미지·오디오 → tensor
        data = image_audio_to_tensor(
            self.face_det, self.feature_extractor,
            image_path, audio_path,
            limit=-1, image_size=min_resolution, area=cfg.area
        )
        if data is None:
            return -1

        h, w = data["ref_img"].shape[-2:]
        if keep_resolution:
            im = Image.open(image_path)
            resolution = f"{im.width//2*2}x{im.height//2*2}"
        else:
            resolution = f"{w}x{h}"

        video = test(self.pipe, cfg, self.whisper, self.audio2token,
                     self.audio2bucket, self.image_encoder,
                     w, h, data)

        # 인터프레임 보간
        if cfg.use_interframe:
            out, frames = video.to(self.device), []
            for i in tqdm(range(out.shape[2]-1), ncols=0):
                mid = self.rife.inference(out[:,:,i], out[:,:,i+1]).clamp(0,1).detach()
                frames.extend([out[:,:,i], mid])
            frames.append(out[:,:,-1])
            video = torch.stack(frames, 2).cpu()

        tmp = output_path.replace(".mp4", "_noaudio.mp4")
        save_videos_grid(video, tmp, n_rows=video.shape[0],
                         fps=cfg.fps*(2 if cfg.use_interframe else 1))
        os.system(
            f"ffmpeg -loglevel error -y -i '{tmp}' -i '{audio_path}' -s {resolution} "
            f"-vcodec libx264 -acodec aac -crf 18 -shortest '{output_path}'")
        os.remove(tmp)
        return 0