|
import cv2 |
|
import numpy as np |
|
import gradio as gr |
|
from tqdm import tqdm |
|
|
|
|
|
def remove_green_screen(video_file, bg_image): |
|
|
|
def process_frame(frame): |
|
|
|
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) |
|
mask = cv2.inRange(hsv, (36, 25, 25), (86, 255, 255)) |
|
mask = cv2.merge((mask, mask, mask)) |
|
masked = cv2.bitwise_and(frame, mask) |
|
|
|
|
|
bg = cv2.imread(bg_image) |
|
bg = cv2.resize(bg, (frame.shape[1], frame.shape[0])) |
|
masked[np.where((masked == [0, 0, 0]).all(axis=2))] = bg[np.where((masked == [0, 0, 0]).all(axis=2))] |
|
|
|
return masked |
|
|
|
cap = cv2.VideoCapture(video_file) |
|
frames = [] |
|
frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) |
|
for i in tqdm(range(frame_count)): |
|
success, frame = cap.read() |
|
if not success: |
|
continue |
|
frames.append(process_frame(frame)) |
|
|
|
cap.release() |
|
|
|
return np.array(frames) |
|
|
|
|
|
iface = gr.Interface( |
|
fn=remove_green_screen, |
|
inputs=[ |
|
gr.inputs.Video(type="mp4", label="Input video"), |
|
gr.inputs.Image(label="Background image") |
|
], |
|
outputs=gr.outputs.Video(label="Processed video") |
|
) |
|
|
|
iface.launch() |
|
|