Spaces:
Running
on
Zero
Running
on
Zero
Commit
·
defab3d
1
Parent(s):
4fc2798
add
Browse files
app.py
CHANGED
@@ -333,7 +333,7 @@ class BaseTrainer(object):
|
|
333 |
return result
|
334 |
|
335 |
|
336 |
-
@spaces.GPU(duration=
|
337 |
def _warp(args,model, batch_data,joints,joint_mask_upper,joint_mask_hands,joint_mask_lower,use_trans,mean_upper,mean_hands,mean_lower,std_upper,std_hands,std_lower,trans_mean,trans_std):
|
338 |
diffusion = create_gaussian_diffusion(use_ddim=args.use_ddim)
|
339 |
args,model,vq_model_upper,vq_model_hands,vq_model_lower,mean_upper,mean_hands,mean_lower,std_upper,std_hands,std_lower,trans_mean,trans_std,vqvae_latent_scale=_warp_create_cuda_model(args,model,mean_upper,mean_hands,mean_lower,std_upper,std_hands,std_lower,trans_mean,trans_std)
|
@@ -777,7 +777,7 @@ demo = gr.Interface(
|
|
777 |
title='SynTalker: Enabling Synergistic Full-Body Control in Prompt-Based Co-Speech Motion Generation',
|
778 |
description="1. Upload your audio. <br/>\
|
779 |
2. Then, sit back and wait for the rendering to happen! This may take a while (e.g. 2-5 minutes) <br/>\
|
780 |
-
(The reason of running time so long is that provided GPU have an limitation in GPU running time, we must use CPU to handle some GPU tasks)
|
781 |
3. After, you can view the videos. <br/>\
|
782 |
4. Notice that we use a fix face animation, our method only produce body motion. <br/>\
|
783 |
5. Use DDPM sample strategy will generate a better result, while it will take more inference time. \
|
|
|
333 |
return result
|
334 |
|
335 |
|
336 |
+
@spaces.GPU(duration=140)
|
337 |
def _warp(args,model, batch_data,joints,joint_mask_upper,joint_mask_hands,joint_mask_lower,use_trans,mean_upper,mean_hands,mean_lower,std_upper,std_hands,std_lower,trans_mean,trans_std):
|
338 |
diffusion = create_gaussian_diffusion(use_ddim=args.use_ddim)
|
339 |
args,model,vq_model_upper,vq_model_hands,vq_model_lower,mean_upper,mean_hands,mean_lower,std_upper,std_hands,std_lower,trans_mean,trans_std,vqvae_latent_scale=_warp_create_cuda_model(args,model,mean_upper,mean_hands,mean_lower,std_upper,std_hands,std_lower,trans_mean,trans_std)
|
|
|
777 |
title='SynTalker: Enabling Synergistic Full-Body Control in Prompt-Based Co-Speech Motion Generation',
|
778 |
description="1. Upload your audio. <br/>\
|
779 |
2. Then, sit back and wait for the rendering to happen! This may take a while (e.g. 2-5 minutes) <br/>\
|
780 |
+
(The reason of running time so long is that provided GPU have an limitation in GPU running time, we must use CPU to handle some GPU tasks)<br/>\
|
781 |
3. After, you can view the videos. <br/>\
|
782 |
4. Notice that we use a fix face animation, our method only produce body motion. <br/>\
|
783 |
5. Use DDPM sample strategy will generate a better result, while it will take more inference time. \
|