embed / app.py
om-app's picture
Update app.py
0094cc8
raw
history blame
2.22 kB
import cv2
import numpy as np
import gradio as gr
def remove_green_screen(video_file, bg_image):
# Load the background image
bg = cv2.imread(bg_image)
# Create a video capture object to read from the video file
cap = cv2.VideoCapture(video_file)
# Get the video dimensions
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
# Create a video writer object to write the output to a new video file
fourcc = cv2.VideoWriter_fourcc(*"mp4v")
out = cv2.VideoWriter("output.mp4", fourcc, 30, (width, height))
# Loop over each frame in the video
while True:
# Read a frame from the video
ret, frame = cap.read()
# If we have reached the end of the video, break out of the loop
if not ret:
break
# Convert the frame to HSV color space
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# Define the range of green color in HSV
lower_green = np.array([40, 50, 50])
upper_green = np.array([90, 255, 255])
# Create a mask for the green color range
mask = cv2.inRange(hsv, lower_green, upper_green)
# Invert the mask
mask = cv2.bitwise_not(mask)
# Apply the mask to the frame to remove the green screen
fg = cv2.bitwise_and(frame, frame, mask=mask)
# Apply the mask to the background image
bg_mask = cv2.bitwise_not(mask)
bg_mask = cv2.cvtColor(bg_mask, cv2.COLOR_GRAY2BGR)
bg_fg = cv2.bitwise_and(bg, bg_mask)
# Combine the foreground and background images
output = cv2.add(fg, bg_fg)
# Write the output frame to the video file
out.write(output)
# Release the video capture and video writer objects
cap.release()
out.release()
return "output.mp4"
# Define the input and output components for the app
video_file = gr.inputs.Video(label="Input Video")
bg_image = gr.inputs.Image(label="Background Image")
output_file = gr.outputs.Video(label="Output Video")
# Create the app interface
iface = gr.Interface(fn=remove_green_screen, inputs=[video_file, bg_image], outputs=output_file)
# Launch the app
iface.launch()