File size: 2,117 Bytes
69a9435 0094cc8 589c5cb 8901f41 69a9435 589c5cb 69a9435 589c5cb 69a9435 589c5cb 69a9435 589c5cb 0094cc8 69a9435 589c5cb 69a9435 589c5cb 69a9435 589c5cb 69a9435 589c5cb 69a9435 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 |
import gradio as gr
import cv2
import numpy as np
from tqdm import tqdm
def process_frame(frame):
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
lower_green = np.array([36, 25, 25])
upper_green = np.array([86, 255, 255])
mask = cv2.inRange(hsv, lower_green, upper_green)
mask_inv = cv2.bitwise_not(mask)
bg = cv2.imread(bg_image)
bg = cv2.resize(bg, (frame.shape[1], frame.shape[0]))
bg_masked = cv2.bitwise_and(bg, bg, mask=mask)
fg_masked = cv2.bitwise_and(frame, frame, mask=mask_inv)
return cv2.add(bg_masked, fg_masked)
def remove_green_screen(video, bg_image):
cap = cv2.VideoCapture(video.name)
fps = cap.get(cv2.CAP_PROP_FPS)
frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
out = cv2.VideoWriter('out.mp4', cv2.VideoWriter_fourcc(*'mp4v'), fps, (width, height))
frames = []
for i in tqdm(range(frame_count)):
ret, frame = cap.read()
if not ret:
break
frames.append(process_frame(frame))
cap.release()
for frame in tqdm(frames):
out.write(frame)
out.release()
return 'out.mp4'
def predict_video(video, bg_image):
result_file = remove_green_screen(video, bg_image)
with open(result_file, 'rb') as f:
result = f.read()
return result
inputs = [
gr.inputs.Video(label="Video"),
gr.inputs.Image(label="Background Image")
]
outputs = gr.outputs.Video(label="Processed Video", type="mp4", source="file")
title = "Green Screen Video Remover"
description = "Remove the green background from a video and replace it with another image."
article = "<p style='text-align: center'><a href='https://huggingface.co/blog/how-to-build-a-web-app-for-a-transformer-in-pytorch'>How to Build a Web App for a Transformer in PyTorch</a></p>"
examples = [
["./input.mp4", "./bg.png"],
["./input2.mp4", "./bg2.png"]
]
gr.Interface(predict_video, inputs, outputs, title=title, description=description, article=article, examples=examples, live=True).launch()
|