video-background-removal / green_process.py
svjack's picture
Update green_process.py
9b92a97 verified
'''
python green_process.py Star_Rail_Tribbie_MMD_Videos_sp30 Star_Rail_Tribbie_MMD_Videos_30s_Green --fast_mode --max_workers=10
'''
import os
import sys
import argparse
import torch
from torchvision import transforms
from moviepy import VideoFileClip, vfx, concatenate_videoclips, ImageSequenceClip
from PIL import Image
import numpy as np
from concurrent.futures import ThreadPoolExecutor
from transformers import AutoModelForImageSegmentation
# Set up device
device = "cuda" if torch.cuda.is_available() else "cpu"
# Load both BiRefNet models
birefnet = AutoModelForImageSegmentation.from_pretrained("ZhengPeng7/BiRefNet", trust_remote_code=True)
birefnet.to(device)
birefnet_lite = AutoModelForImageSegmentation.from_pretrained("ZhengPeng7/BiRefNet_lite", trust_remote_code=True)
birefnet_lite.to(device)
# Image transformation pipeline
transform_image = transforms.Compose([
transforms.Resize((768, 768)),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
])
def process_frame(frame, fast_mode):
try:
pil_image = Image.fromarray(frame)
#processed_image = process(pil_image, "#000000", fast_mode)
processed_image = process(pil_image, "#00FF00", fast_mode)
return np.array(processed_image)
except Exception as e:
print(f"Error processing frame: {e}")
return frame
def process(image, bg, fast_mode=False):
image_size = image.size
input_images = transform_image(image).unsqueeze(0).to(device)
model = birefnet_lite if fast_mode else birefnet
with torch.no_grad():
preds = model(input_images)[-1].sigmoid().cpu()
pred = preds[0].squeeze()
pred_pil = transforms.ToPILImage()(pred)
mask = pred_pil.resize(image_size)
if isinstance(bg, str) and bg.startswith("#"):
color_rgb = tuple(int(bg[i:i+2], 16) for i in (1, 3, 5))
background = Image.new("RGBA", image_size, color_rgb + (255,))
elif isinstance(bg, Image.Image):
background = bg.convert("RGBA").resize(image_size)
else:
background = Image.open(bg).convert("RGBA").resize(image_size)
image = Image.composite(image, background, mask)
return image
def process_video(video_path, output_path, fast_mode=True, max_workers=10):
try:
video = VideoFileClip(video_path)
fps = video.fps
audio = video.audio
frames = list(video.iter_frames(fps=fps))
processed_frames = []
with ThreadPoolExecutor(max_workers=max_workers) as executor:
futures = [executor.submit(process_frame, frames[i], fast_mode) for i in range(len(frames))]
for future in futures:
result = future.result()
processed_frames.append(result)
processed_video = ImageSequenceClip(processed_frames, fps=fps)
processed_video = processed_video.with_audio(audio)
processed_video.write_videofile(output_path, codec="libx264")
except Exception as e:
print(f"Error processing video {video_path}: {e}")
def main(input_folder, output_folder, fast_mode=True, max_workers=10):
if not os.path.exists(output_folder):
os.makedirs(output_folder)
for video_file in os.listdir(input_folder):
if video_file.endswith((".mp4", ".avi", ".mov")):
video_path = os.path.join(input_folder, video_file)
output_path = os.path.join(output_folder, video_file)
print(f"Processing {video_path}...")
process_video(video_path, output_path, fast_mode, max_workers)
print(f"Finished processing {video_path}")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Process videos to replace background with green.")
parser.add_argument("input_folder", type=str, help="Path to the folder containing input videos.")
parser.add_argument("output_folder", type=str, help="Path to the folder where processed videos will be saved.")
parser.add_argument("--fast_mode", action="store_true", help="Use BiRefNet_lite for faster processing.")
parser.add_argument("--max_workers", type=int, default=10, help="Number of workers for parallel processing.")
args = parser.parse_args()
main(args.input_folder, args.output_folder, args.fast_mode, args.max_workers)