fantaxy commited on
Commit
07e35a2
·
verified ·
1 Parent(s): 1f11e93

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +58 -47
app.py CHANGED
@@ -1,3 +1,4 @@
 
1
  import spaces
2
  import gradio as gr
3
  import os
@@ -46,56 +47,67 @@ model = model.cuda()
46
  # 번역 모델 로드
47
  translator = pipeline("translation", model="Helsinki-NLP/opus-mt-ko-en")
48
 
49
- @spaces.GPU(duration=300, gpu_type="h100")
50
  def infer(image, prompt, steps=50, cfg_scale=7.5, eta=1.0, fs=3, seed=123):
51
- # 한글 입력 확인 및 번역
52
- if any('\u3131' <= char <= '\u318E' or '\uAC00' <= char <= '\uD7A3' for char in prompt):
53
- translated = translator(prompt, max_length=512)
54
- prompt = translated[0]['translation_text']
55
-
56
- resolution = (576, 1024)
57
- save_fps = 8
58
- seed_everything(seed)
59
- transform = transforms.Compose([
60
- transforms.Resize(min(resolution)),
61
- transforms.CenterCrop(resolution),
 
62
  ])
63
- torch.cuda.empty_cache()
64
- print('start:', prompt, time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())))
65
- start = time.time()
66
- if steps > 60:
67
- steps = 60
68
 
69
- batch_size = 1
70
- channels = model.model.diffusion_model.out_channels
71
- frames = model.temporal_length
72
- h, w = resolution[0] // 8, resolution[1] // 8
73
- noise_shape = [batch_size, channels, frames, h, w]
74
 
75
- with torch.no_grad(), torch.cuda.amp.autocast():
76
- text_emb = model.get_learned_conditioning([prompt])
 
 
 
 
 
 
 
 
77
 
78
- img_tensor = torch.from_numpy(image).permute(2, 0, 1).float().to(model.device)
79
- img_tensor = (img_tensor / 255. - 0.5) * 2
80
- image_tensor_resized = transform(img_tensor)
81
- videos = image_tensor_resized.unsqueeze(0)
82
 
83
- z = get_latent_z(model, videos.unsqueeze(2))
84
- img_tensor_repeat = repeat(z, 'b c t h w -> b c (repeat t) h w', repeat=frames)
85
-
86
- cond_images = model.embedder(img_tensor.unsqueeze(0))
87
- img_emb = model.image_proj_model(cond_images)
88
-
89
- imtext_cond = torch.cat([text_emb, img_emb], dim=1)
90
-
91
- fs = torch.tensor([fs], dtype=torch.long, device=model.device)
92
- cond = {"c_crossattn": [imtext_cond], "fs": fs, "c_concat": [img_tensor_repeat]}
93
 
94
- batch_samples = batch_ddim_sampling(model, cond, noise_shape, n_samples=1, ddim_steps=steps, ddim_eta=eta, cfg_scale=cfg_scale)
95
-
96
- video_path = './output.mp4'
97
- save_videos(batch_samples, './', filenames=['output'], fps=save_fps)
98
- return video_path
 
 
 
 
 
 
 
 
 
 
 
 
 
99
 
100
  i2v_examples = [
101
  ['prompts/1024/astronaut04.png', '우주인 복장으로 기타를 치는 남자', 30, 7.5, 1.0, 6, 123],
@@ -113,7 +125,7 @@ with gr.Blocks(analytics_enabled=False, css=css) as dynamicrafter_iface:
113
  with gr.Row():
114
  i2v_input_image = gr.Image(label="Input Image",elem_id="input_img")
115
  with gr.Row():
116
- i2v_input_text = gr.Text(label='Prompts (한글 입력 가능)')
117
  with gr.Row():
118
  i2v_seed = gr.Slider(label='Random Seed', minimum=0, maximum=10000, step=1, value=123)
119
  i2v_eta = gr.Slider(minimum=0.0, maximum=1.0, step=0.1, label='ETA', value=1.0, elem_id="i2v_eta")
@@ -129,12 +141,11 @@ with gr.Blocks(analytics_enabled=False, css=css) as dynamicrafter_iface:
129
  inputs=[i2v_input_image, i2v_input_text, i2v_steps, i2v_cfg_scale, i2v_eta, i2v_motion, i2v_seed],
130
  outputs=[i2v_output_video],
131
  fn = infer,
132
- cache_examples=True,
133
  )
134
  i2v_end_btn.click(inputs=[i2v_input_image, i2v_input_text, i2v_steps, i2v_cfg_scale, i2v_eta, i2v_motion, i2v_seed],
135
  outputs=[i2v_output_video],
136
  fn = infer
137
  )
138
 
139
- dynamicrafter_iface.launch()
140
- #dynamicrafter_iface.launch(server_port=7890, server_name="0.0.0.0", share=True)
 
1
+ # -*- coding: utf-8 -*-
2
  import spaces
3
  import gradio as gr
4
  import os
 
47
  # 번역 모델 로드
48
  translator = pipeline("translation", model="Helsinki-NLP/opus-mt-ko-en")
49
 
50
+ @spaces.GPU(duration=300, gpu_type="l40s")
51
  def infer(image, prompt, steps=50, cfg_scale=7.5, eta=1.0, fs=3, seed=123):
52
+ try:
53
+ # 한글 입력 확인 번역
54
+ if any('\u3131' <= char <= '\u318E' or '\uAC00' <= char <= '\uD7A3' for char in prompt):
55
+ translated = translator(prompt, max_length=512)
56
+ prompt = translated[0]['translation_text']
57
+
58
+ resolution = (576, 1024)
59
+ save_fps = 8
60
+ seed_everything(seed)
61
+ transform = transforms.Compose([
62
+ transforms.Resize(min(resolution), antialias=True),
63
+ transforms.CenterCrop(resolution),
64
  ])
65
+
66
+ print('start:', prompt, time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())))
67
+ start = time.time()
68
+ if steps > 60:
69
+ steps = 60
70
 
71
+ batch_size = 1
72
+ channels = model.model.diffusion_model.out_channels
73
+ frames = model.temporal_length
74
+ h, w = resolution[0] // 8, resolution[1] // 8
75
+ noise_shape = [batch_size, channels, frames, h, w]
76
 
77
+ with torch.no_grad(), torch.cuda.amp.autocast():
78
+ text_emb = model.get_learned_conditioning([prompt])
79
+
80
+ img_tensor = torch.from_numpy(image).permute(2, 0, 1).float().to(model.device)
81
+ img_tensor = (img_tensor / 255. - 0.5) * 2
82
+ image_tensor_resized = transform(img_tensor)
83
+ videos = image_tensor_resized.unsqueeze(0)
84
+
85
+ z = get_latent_z(model, videos.unsqueeze(2))
86
+ img_tensor_repeat = repeat(z, 'b c t h w -> b c (repeat t) h w', repeat=frames)
87
 
88
+ cond_images = model.embedder(img_tensor.unsqueeze(0))
89
+ img_emb = model.image_proj_model(cond_images)
 
 
90
 
91
+ imtext_cond = torch.cat([text_emb, img_emb], dim=1)
 
 
 
 
 
 
 
 
 
92
 
93
+ fs = torch.tensor([fs], dtype=torch.long, device=model.device)
94
+ cond = {"c_crossattn": [imtext_cond], "fs": fs, "c_concat": [img_tensor_repeat]}
95
+
96
+ batch_samples = batch_ddim_sampling(model, cond, noise_shape, n_samples=1, ddim_steps=steps, ddim_eta=eta, cfg_scale=cfg_scale)
97
+
98
+ video_path = './output.mp4'
99
+ save_videos(batch_samples, './', filenames=['output'], fps=save_fps)
100
+
101
+ # 메모리 정리
102
+ del text_emb, img_tensor, image_tensor_resized, videos, z, img_tensor_repeat, cond_images, img_emb, imtext_cond, cond, batch_samples
103
+ torch.cuda.empty_cache()
104
+
105
+ return video_path
106
+ except Exception as e:
107
+ print(f"Error occurred: {e}")
108
+ return None
109
+ finally:
110
+ torch.cuda.empty_cache()
111
 
112
  i2v_examples = [
113
  ['prompts/1024/astronaut04.png', '우주인 복장으로 기타를 치는 남자', 30, 7.5, 1.0, 6, 123],
 
125
  with gr.Row():
126
  i2v_input_image = gr.Image(label="Input Image",elem_id="input_img")
127
  with gr.Row():
128
+ i2v_input_text = gr.Textbox(label='Prompts (한글 입력 가능)')
129
  with gr.Row():
130
  i2v_seed = gr.Slider(label='Random Seed', minimum=0, maximum=10000, step=1, value=123)
131
  i2v_eta = gr.Slider(minimum=0.0, maximum=1.0, step=0.1, label='ETA', value=1.0, elem_id="i2v_eta")
 
141
  inputs=[i2v_input_image, i2v_input_text, i2v_steps, i2v_cfg_scale, i2v_eta, i2v_motion, i2v_seed],
142
  outputs=[i2v_output_video],
143
  fn = infer,
144
+ cache_examples=False # 이 부분을 False로 설정하여 캐시를 비활성화
145
  )
146
  i2v_end_btn.click(inputs=[i2v_input_image, i2v_input_text, i2v_steps, i2v_cfg_scale, i2v_eta, i2v_motion, i2v_seed],
147
  outputs=[i2v_output_video],
148
  fn = infer
149
  )
150
 
151
+ dynamicrafter_iface.launch(server_port=7890, server_name="0.0.0.0", share=True)