openfree commited on
Commit
2399e79
·
verified ·
1 Parent(s): 9132603

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +176 -121
app.py CHANGED
@@ -1,138 +1,193 @@
1
- # app.py
2
- import os, io, hashlib, spaces, gradio as gr
3
- from pydub import AudioSegment
4
- from PIL import Image
5
  import numpy as np
6
- from sonic import Sonic # <-- 수정된 sonic.py 사용
7
-
8
- # ------------------------------------------------------------------
9
- # 1. 필요한 모델·라이브러리 설치 & 체크포인트 다운로드
10
- # ------------------------------------------------------------------
11
-
12
- SETUP_CMD = (
13
- 'python -m pip install "huggingface_hub[cli]" accelerate; '
14
- 'huggingface-cli download LeonJoe13/Sonic '
15
- ' --local-dir checkpoints/Sonic --local-dir-use-symlinks False; '
16
- 'huggingface-cli download stabilityai/stable-video-diffusion-img2vid-xt '
17
- ' --local-dir checkpoints/stable-video-diffusion-img2vid-xt --local-dir-use-symlinks False; '
18
- 'huggingface-cli download openai/whisper-tiny '
19
- ' --local-dir checkpoints/whisper-tiny --local-dir-use-symlinks False; '
20
  )
21
- os.system(SETUP_CMD)
22
-
23
- # ------------------------------------------------------------------
24
- # 2. 파이프라인 초기화 (GPU 한 번만)
25
- # ------------------------------------------------------------------
26
- pipe = Sonic() # 오류가 사라진 상태로 초기화
27
-
28
- # ------------------------------------------------------------------
29
- # 3. 유틸리티
30
- # ------------------------------------------------------------------
31
- def _md5(b: bytes) -> str:
32
- return hashlib.md5(b).hexdigest()
33
 
34
- TMP_DIR = "tmp_path"
35
- RES_DIR = "res_path"
36
- os.makedirs(TMP_DIR, exist_ok=True)
37
- os.makedirs(RES_DIR, exist_ok=True)
38
 
39
- # ------------------------------------------------------------------
40
- # 4. 실제 비디오 생성 (GPU 태스크)
41
- # ------------------------------------------------------------------
42
- @spaces.GPU(duration=600) # 최대 10분
43
- def _render_video(img_path: str,
44
- audio_path: str,
45
- out_path: str,
46
- dynamic_scale: float = 1.0) -> str | int:
47
 
 
 
 
48
  min_resolution = 512
49
- audio = AudioSegment.from_file(audio_path)
50
- duration_sec = len(audio) / 1000.0
51
- steps = int(np.clip(duration_sec * 12.5, 25, 750))
52
-
53
- print(f"[INFO] Audio duration={duration_sec:.2f}s inference_steps={steps}")
54
-
55
- face_info = pipe.preprocess(img_path)
56
- print(f"[INFO] Face detection info: {face_info}")
57
-
58
- if face_info["face_num"] == 0:
59
- return -1 # 얼굴 미검출
60
-
61
- os.makedirs(os.path.dirname(out_path), exist_ok=True)
62
- pipe.process(
63
- img_path, audio_path, out_path,
64
- min_resolution=min_resolution,
65
- inference_steps=steps,
66
- dynamic_scale=dynamic_scale,
67
- )
68
- return out_path
69
-
70
- # ------------------------------------------------------------------
71
- # 5. Gradio 래퍼
72
- # ------------------------------------------------------------------
73
- def run_sonic(image, audio, dynamic_scale):
74
-
 
 
 
 
 
 
 
 
 
 
75
  if image is None:
76
- raise gr.Error("Please upload an image.")
77
  if audio is None:
78
- raise gr.Error("Please upload an audio file.")
79
-
80
- # ── 이미지 캐시 ───────────────────────────────────────────────
81
- buf_i = io.BytesIO(); image.save(buf_i, format="PNG")
82
- img_hash = _md5(buf_i.getvalue())
83
- img_path = os.path.join(TMP_DIR, f"{img_hash}.png")
84
- if not os.path.exists(img_path):
85
- with open(img_path, "wb") as f: f.write(buf_i.getvalue())
86
-
87
- # ── 오디오 캐시 (mono/16 kHz, ≤60 s) ─────────────────────────
88
- rate, arr = audio[:2]
89
- if arr.ndim == 1: arr = arr[:, None]
90
- seg = AudioSegment(arr.tobytes(), frame_rate=rate,
91
- sample_width=arr.dtype.itemsize, channels=arr.shape[1])
92
- seg = seg.set_channels(1).set_frame_rate(16000)[:60_000]
93
- buf_a = io.BytesIO(); seg.export(buf_a, format="wav")
94
- aud_hash = _md5(buf_a.getvalue())
95
- aud_path = os.path.join(TMP_DIR, f"{aud_hash}.wav")
96
- if not os.path.exists(aud_path):
97
- with open(aud_path, "wb") as f: f.write(buf_a.getvalue())
98
-
99
- # ── 결과 경로 ────────────────────────────────────────────────
100
- out_path = os.path.join(
101
- RES_DIR, f"{img_hash}_{aud_hash}_{dynamic_scale:.1f}.mp4"
102
  )
103
-
104
- if os.path.exists(out_path):
105
- print(f"[INFO] Cache hit → {out_path}")
106
- return out_path
107
-
108
- print(f"[INFO] Generating video (dynamic_scale={dynamic_scale}) …")
109
- return _render_video(img_path, aud_path, out_path, dynamic_scale)
110
-
111
- # ------------------------------------------------------------------
112
- # 6. Gradio UI
113
- # ------------------------------------------------------------------
114
- CSS = """
115
- .gradio-container{font-family:Arial, sans-serif}
116
- .main-header{text-align:center;color:#2a2a2a;margin-bottom:2em}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
117
  """
118
 
119
- with gr.Blocks(css=CSS) as demo:
120
  gr.HTML("""
121
- <div class="main-header">
122
- <h1>🎭 Sonic - Portrait Animation</h1>
123
- <p>Turn a single photo into a talking-head video (≤1 min audio)</p>
124
- </div>""")
125
-
 
126
  with gr.Row():
127
  with gr.Column():
128
- img_in = gr.Image(type="pil", label="Portrait Image")
129
- aud_in = gr.Audio(label="Voice / Audio (≤60 s)", type="numpy")
130
- scale = gr.Slider(0.5, 2.0, 1.0, step=0.1,
131
- label="Animation Intensity")
132
- btn = gr.Button("Generate Animation", variant="primary")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
133
  with gr.Column():
134
- vid_out = gr.Video(label="Result")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
135
 
136
- btn.click(run_sonic, [img_in, aud_in, scale], vid_out)
137
 
138
- demo.launch(share=True)
 
 
1
+ import spaces
2
+ import gradio as gr
3
+ import os
 
4
  import numpy as np
5
+ from pydub import AudioSegment
6
+ import hashlib
7
+ from sonic import Sonic
8
+ from PIL import Image
9
+ import torch
10
+
11
+ # 모델 초기화
12
+ cmd = (
13
+ 'python3 -m pip install "huggingface_hub[cli]"; '
14
+ 'huggingface-cli download LeonJoe13/Sonic --local-dir checkpoints; '
15
+ 'huggingface-cli download stabilityai/stable-video-diffusion-img2vid-xt --local-dir checkpoints/stable-video-diffusion-img2vid-xt; '
16
+ 'huggingface-cli download openai/whisper-tiny --local-dir checkpoints/whisper-tiny;'
 
 
17
  )
18
+ os.system(cmd)
 
 
 
 
 
 
 
 
 
 
 
19
 
20
+ pipe = Sonic()
 
 
 
21
 
22
+ def get_md5(content):
23
+ md5hash = hashlib.md5(content)
24
+ return md5hash.hexdigest()
 
 
 
 
 
25
 
26
+ @spaces.GPU(duration=300) # 긴 비디오 처리를 위해 duration 300초로 설정
27
+ def get_video_res(img_path, audio_path, res_video_path, dynamic_scale=1.0):
28
+ expand_ratio = 0.5
29
  min_resolution = 512
30
+ inference_steps = 25 # 2초 분량의 비디오(25 프레임)로 고정
31
+
32
+ # 오디오 길이(참고용) 출력
33
+ audio = AudioSegment.from_file(audio_path)
34
+ duration = len(audio) / 1000.0 # 단위
35
+ print(f"Audio duration: {duration} seconds, using inference_steps: {inference_steps}")
36
+
37
+ face_info = pipe.preprocess(img_path, expand_ratio=expand_ratio)
38
+ print(f"Face detection info: {face_info}")
39
+
40
+ if face_info['face_num'] > 0:
41
+ crop_image_path = img_path + '.crop.png'
42
+ pipe.crop_image(img_path, crop_image_path, face_info['crop_bbox'])
43
+ img_path = crop_image_path
44
+ os.makedirs(os.path.dirname(res_video_path), exist_ok=True)
45
+
46
+ # 고정된 inference_steps(25)로 비디오 생성
47
+ pipe.process(
48
+ img_path,
49
+ audio_path,
50
+ res_video_path,
51
+ min_resolution=min_resolution,
52
+ inference_steps=inference_steps,
53
+ dynamic_scale=dynamic_scale
54
+ )
55
+ return res_video_path
56
+ else:
57
+ return -1
58
+
59
+ tmp_path = './tmp_path/'
60
+ res_path = './res_path/'
61
+ os.makedirs(tmp_path, exist_ok=True)
62
+ os.makedirs(res_path, exist_ok=True)
63
+
64
+ def process_sonic(image, audio, dynamic_scale):
65
+ # 입력 검증
66
  if image is None:
67
+ raise gr.Error("Please upload an image")
68
  if audio is None:
69
+ raise gr.Error("Please upload an audio file")
70
+
71
+ img_md5 = get_md5(np.array(image))
72
+ audio_md5 = get_md5(audio[1])
73
+ print(f"Processing with image hash: {img_md5}, audio hash: {audio_md5}")
74
+
75
+ sampling_rate, arr = audio[:2]
76
+ if len(arr.shape) == 1:
77
+ arr = arr[:, None]
78
+
79
+ # numpy array로부터 AudioSegment 생성
80
+ audio_segment = AudioSegment(
81
+ arr.tobytes(),
82
+ frame_rate=sampling_rate,
83
+ sample_width=arr.dtype.itemsize,
84
+ channels=arr.shape[1]
 
 
 
 
 
 
 
 
85
  )
86
+ audio_segment = audio_segment.set_frame_rate(sampling_rate)
87
+
88
+ # 파일 경로 생성
89
+ image_path = os.path.abspath(os.path.join(tmp_path, f'{img_md5}.png'))
90
+ audio_path = os.path.abspath(os.path.join(tmp_path, f'{audio_md5}.wav'))
91
+ res_video_path = os.path.abspath(os.path.join(res_path, f'{img_md5}_{audio_md5}_{dynamic_scale}.mp4'))
92
+
93
+ # 입력 파일이 없으면 저장
94
+ if not os.path.exists(image_path):
95
+ image.save(image_path)
96
+ if not os.path.exists(audio_path):
97
+ audio_segment.export(audio_path, format="wav")
98
+
99
+ # 캐시된 결과가 있으면 반환, 없으면 새로 생성
100
+ if os.path.exists(res_video_path):
101
+ print(f"Using cached result: {res_video_path}")
102
+ return res_video_path
103
+ else:
104
+ print(f"Generating new video with dynamic scale: {dynamic_scale}")
105
+ return get_video_res(image_path, audio_path, res_video_path, dynamic_scale)
106
+
107
+ # 예시 데이터를 위한 dummy 함수 (필요시 실제 예시 데이터를 추가하세요)
108
+ def get_example():
109
+ return []
110
+
111
+ css = """
112
+ .gradio-container {
113
+ font-family: 'Arial', sans-serif;
114
+ }
115
+ .main-header {
116
+ text-align: center;
117
+ color: #2a2a2a;
118
+ margin-bottom: 2em;
119
+ }
120
+ .parameter-section {
121
+ background-color: #f5f5f5;
122
+ padding: 1em;
123
+ border-radius: 8px;
124
+ margin: 1em 0;
125
+ }
126
+ .example-section {
127
+ margin-top: 2em;
128
+ }
129
  """
130
 
131
+ with gr.Blocks(css=css) as demo:
132
  gr.HTML("""
133
+ <div class="main-header">
134
+ <h1>🎭 Sonic: Advanced Portrait Animation</h1>
135
+ <p>Transform still images into dynamic videos synchronized with audio</p>
136
+ </div>
137
+ """)
138
+
139
  with gr.Row():
140
  with gr.Column():
141
+ image_input = gr.Image(
142
+ type='pil',
143
+ label="Portrait Image",
144
+ elem_id="image_input"
145
+ )
146
+
147
+ audio_input = gr.Audio(
148
+ label="Voice/Audio Input",
149
+ elem_id="audio_input",
150
+ type="numpy"
151
+ )
152
+
153
+ with gr.Column():
154
+ dynamic_scale = gr.Slider(
155
+ minimum=0.5,
156
+ maximum=2.0,
157
+ value=1.0,
158
+ step=0.1,
159
+ label="Animation Intensity",
160
+ info="Adjust to control movement intensity (0.5: subtle, 2.0: dramatic)"
161
+ )
162
+
163
+ process_btn = gr.Button(
164
+ "Generate Animation",
165
+ variant="primary",
166
+ elem_id="process_btn"
167
+ )
168
+
169
  with gr.Column():
170
+ video_output = gr.Video(
171
+ label="Generated Animation",
172
+ elem_id="video_output"
173
+ )
174
+
175
+ process_btn.click(
176
+ fn=process_sonic,
177
+ inputs=[image_input, audio_input, dynamic_scale],
178
+ outputs=video_output,
179
+ api_name="animate"
180
+ )
181
+
182
+ gr.Examples(
183
+ examples=get_example(),
184
+ fn=process_sonic,
185
+ inputs=[image_input, audio_input, dynamic_scale],
186
+ outputs=video_output,
187
+ cache_examples=False
188
+ )
189
+
190
 
 
191
 
192
+ # 공개 링크 생성: share=True
193
+ demo.launch(share=True)