|
import streamlit as st |
|
import os |
|
import mimetypes |
|
from deface.deface import anonymize_frame |
|
from deface.centerface import CenterFace |
|
from PIL import Image |
|
import imageio.v2 as imageio |
|
import cv2 |
|
import tempfile |
|
import os |
|
|
|
def streamlit_video_deface_cli(uploaded_file, keep_audio): |
|
temp_video_path = tempfile.NamedTemporaryFile(delete=False, suffix=".mp4").name |
|
processed_video_temp_path = tempfile.NamedTemporaryFile(delete=False, suffix=".mp4").name |
|
|
|
with open(temp_video_path, 'wb') as f: |
|
f.write(uploaded_file.read()) |
|
|
|
if keep_audio: |
|
command = f"deface {temp_video_path} --keep-audio -o {processed_video_temp_path}" |
|
else: |
|
command = f"deface {temp_video_path} -o {processed_video_temp_path}" |
|
|
|
os.system(command) |
|
|
|
return processed_video_temp_path |
|
|
|
|
|
def process_video_frame(frame, centerface, threshold, replacewith, mask_scale, ellipse, draw_scores, replaceimg=None, mosaicsize=20): |
|
dets, _ = centerface(frame, threshold=threshold) |
|
anonymize_frame(dets, frame, mask_scale=mask_scale, replacewith=replacewith, ellipse=ellipse, draw_scores=draw_scores, replaceimg=replaceimg, mosaicsize=mosaicsize) |
|
return frame |
|
|
|
|
|
def streamlit_video_detect(uploaded_file, centerface, threshold, replacewith, mask_scale, ellipse, draw_scores, enable_preview, keep_audio, replaceimg=None, mosaicsize=20): |
|
temp_video_path = tempfile.NamedTemporaryFile(delete=False, suffix=".mp4").name |
|
processed_video_temp_path = tempfile.NamedTemporaryFile(delete=False, suffix=".mp4").name |
|
output_with_audio_temp_path = tempfile.NamedTemporaryFile(delete=False, suffix=".mp4").name |
|
with open(temp_video_path, 'wb') as f: |
|
f.write(uploaded_file.read()) |
|
|
|
vidcap = cv2.VideoCapture(temp_video_path) |
|
|
|
|
|
width = int(vidcap.get(cv2.CAP_PROP_FRAME_WIDTH)) |
|
height = int(vidcap.get(cv2.CAP_PROP_FRAME_HEIGHT)) |
|
fps = vidcap.get(cv2.CAP_PROP_FPS) |
|
total_frames = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT)) |
|
|
|
|
|
|
|
out = cv2.VideoWriter(processed_video_temp_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (width, height)) |
|
|
|
|
|
progress_text = "Processing video. Please wait." |
|
my_bar = st.progress(0, text=progress_text) |
|
frames_processed = 0 |
|
|
|
while True: |
|
ret, frame = vidcap.read() |
|
if not ret: |
|
break |
|
|
|
processed_frame = process_video_frame(frame, centerface, threshold, replacewith, mask_scale, ellipse, draw_scores, replaceimg, mosaicsize) |
|
out.write(processed_frame) |
|
|
|
|
|
frames_processed += 1 |
|
progress_percent = int((frames_processed / total_frames) * 100) |
|
my_bar.progress(progress_percent, text=progress_text) |
|
|
|
vidcap.release() |
|
out.release() |
|
|
|
|
|
my_bar.empty() |
|
st.write("Re-encoding video. Please wait...") |
|
os.system(f"ffmpeg -y -i {processed_video_temp_path} -c:v libx264 {processed_video_temp_path.split('.')[0]}_processed_.mp4") |
|
|
|
if keep_audio: |
|
try: |
|
st.write("Overlaying audio. Please wait...") |
|
command = f"ffmpeg -y -i {processed_video_temp_path.split('.')[0]}_processed_.mp4 -i {temp_video_path} -c:v copy -c:a aac -strict experimental {output_with_audio_temp_path}" |
|
os.system(command) |
|
return output_with_audio_temp_path |
|
except: |
|
return f"{processed_video_temp_path.split('.')[0]}_processed_.mp4" |
|
else: |
|
return f"{processed_video_temp_path.split('.')[0]}_processed_.mp4" |
|
|
|
|
|
def streamlit_image_detect(uploaded_file, centerface, threshold, replacewith, mask_scale, ellipse, draw_scores, enable_preview, keep_metadata, replaceimg=None, mosaicsize=20): |
|
|
|
frame = imageio.imread(uploaded_file) |
|
|
|
|
|
|
|
dets, _ = centerface(frame, threshold=threshold) |
|
anonymize_frame(dets, frame, mask_scale=mask_scale, replacewith=replacewith, ellipse=ellipse, draw_scores=draw_scores, replaceimg=replaceimg, mosaicsize=mosaicsize) |
|
|
|
|
|
result_img = Image.fromarray(frame) |
|
return result_img |
|
|
|
def get_file_type(uploaded_file): |
|
mime = mimetypes.guess_type(uploaded_file.name)[0] |
|
if mime is None: |
|
return None |
|
if mime.startswith('video'): |
|
return 'video' |
|
if mime.startswith('image'): |
|
return 'image' |
|
return mime |
|
|
|
def main(): |
|
st.title("β’οΈπ§π· Media Anonymizer πΉπ§βπ¦³β’οΈ") |
|
|
|
st.write(""" |
|
You can upload images or videos and the application will automatically detect faces and blur them out.\nAudio is removed by deafult. Check the box below to keep audio. |
|
""") |
|
|
|
uploaded_files = st.file_uploader("Upload media files (images,videos)", accept_multiple_files=True) |
|
keep_audio = st.checkbox("Keep Audio") |
|
|
|
for uploaded_file in uploaded_files: |
|
file_type = get_file_type(uploaded_file) |
|
|
|
if file_type == 'image': |
|
|
|
centerface = CenterFace() |
|
result_img = streamlit_image_detect(uploaded_file, centerface, threshold=0.2, replacewith='blur', mask_scale=1.0, ellipse=True, draw_scores=False, enable_preview=False, keep_metadata=False) |
|
st.image(result_img, caption=f'Anonymized {uploaded_file.name}', use_column_width=True) |
|
elif file_type == 'video': |
|
centerface = CenterFace() |
|
processed_video_path = streamlit_video_detect(uploaded_file, centerface, threshold=0.2, replacewith='blur', mask_scale=1.0, ellipse=True, draw_scores=False, enable_preview=False, keep_audio=keep_audio) |
|
st.video(processed_video_path, format='video/mp4', start_time=0) |
|
else: |
|
st.write(f"Unsupported file type for {uploaded_file.name}") |
|
|
|
if __name__ == "__main__": |
|
main() |
|
|