Muhammad Taqi Raza
commited on
Commit
·
bd39eaa
1
Parent(s):
c4283d3
changing resolution
Browse files- gradio_app.py +1 -1
- inference/v2v_data/demo.py +1 -1
gradio_app.py
CHANGED
@@ -71,7 +71,7 @@ def get_anchor_video(video_path, fps, num_frames, target_pose, mode,
|
|
71 |
"--fps", str(fps),
|
72 |
"--depth_inference_steps", str(depth_inference_steps),
|
73 |
"--depth_guidance_scale", str(depth_guidance_scale),
|
74 |
-
# "--near_far_estimated", near_far_estimated,
|
75 |
"--sampler_name", sampler_name,
|
76 |
"--diffusion_guidance_scale", str(diffusion_guidance_scale),
|
77 |
"--diffusion_inference_steps", str(diffusion_inference_steps),
|
|
|
71 |
"--fps", str(fps),
|
72 |
"--depth_inference_steps", str(depth_inference_steps),
|
73 |
"--depth_guidance_scale", str(depth_guidance_scale),
|
74 |
+
# "--near_far_estimated", str(near_far_estimated),
|
75 |
"--sampler_name", sampler_name,
|
76 |
"--diffusion_guidance_scale", str(diffusion_guidance_scale),
|
77 |
"--diffusion_inference_steps", str(diffusion_inference_steps),
|
inference/v2v_data/demo.py
CHANGED
@@ -13,7 +13,7 @@ import torch.nn.functional as F
|
|
13 |
from transformers import Qwen2_5_VLForConditionalGeneration, AutoTokenizer, AutoProcessor
|
14 |
from qwen_vl_utils import process_vision_info
|
15 |
|
16 |
-
def get_center_crop_resolution(original_resoultion, target_aspect_ratio=(
|
17 |
target_h, target_w = target_aspect_ratio
|
18 |
aspect_ratio = target_w / target_h
|
19 |
|
|
|
13 |
from transformers import Qwen2_5_VLForConditionalGeneration, AutoTokenizer, AutoProcessor
|
14 |
from qwen_vl_utils import process_vision_info
|
15 |
|
16 |
+
def get_center_crop_resolution(original_resoultion, target_aspect_ratio=(3, 4)):
|
17 |
target_h, target_w = target_aspect_ratio
|
18 |
aspect_ratio = target_w / target_h
|
19 |
|