|
import gradio as gr |
|
import cv2 |
|
import numpy as np |
|
from tqdm import tqdm |
|
|
|
|
|
def process_frame(frame): |
|
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) |
|
lower_green = np.array([36, 25, 25]) |
|
upper_green = np.array([86, 255, 255]) |
|
mask = cv2.inRange(hsv, lower_green, upper_green) |
|
mask_inv = cv2.bitwise_not(mask) |
|
bg = cv2.imread(bg_image) |
|
bg = cv2.resize(bg, (frame.shape[1], frame.shape[0])) |
|
bg_masked = cv2.bitwise_and(bg, bg, mask=mask) |
|
fg_masked = cv2.bitwise_and(frame, frame, mask=mask_inv) |
|
return cv2.add(bg_masked, fg_masked) |
|
|
|
|
|
def remove_green_screen(video, bg_image): |
|
cap = cv2.VideoCapture(video.name) |
|
fps = cap.get(cv2.CAP_PROP_FPS) |
|
frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) |
|
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) |
|
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) |
|
out = cv2.VideoWriter('out.mp4', cv2.VideoWriter_fourcc(*'mp4v'), fps, (width, height)) |
|
frames = [] |
|
for i in tqdm(range(frame_count)): |
|
ret, frame = cap.read() |
|
if not ret: |
|
break |
|
frames.append(process_frame(frame)) |
|
cap.release() |
|
for frame in tqdm(frames): |
|
out.write(frame) |
|
out.release() |
|
return 'out.mp4' |
|
|
|
|
|
def predict_video(video, bg_image): |
|
result_file = remove_green_screen(video, bg_image) |
|
with open(result_file, 'rb') as f: |
|
result = f.read() |
|
return result |
|
|
|
|
|
inputs = [ |
|
gr.inputs.Video(label="Video"), |
|
gr.inputs.Image(label="Background Image") |
|
] |
|
|
|
outputs = gr.outputs.Video(label="Processed Video", type="mp4", source="file") |
|
|
|
title = "Green Screen Video Remover" |
|
description = "Remove the green background from a video and replace it with another image." |
|
article = "<p style='text-align: center'><a href='https://huggingface.co/blog/how-to-build-a-web-app-for-a-transformer-in-pytorch'>How to Build a Web App for a Transformer in PyTorch</a></p>" |
|
examples = [ |
|
["./input.mp4", "./bg.png"], |
|
["./input2.mp4", "./bg2.png"] |
|
] |
|
|
|
gr.Interface(predict_video, inputs, outputs, title=title, description=description, article=article, examples=examples, live=True).launch() |
|
|