File size: 6,181 Bytes
226eb20
2d67b07
 
 
 
 
 
 
 
 
226eb20
2d67b07
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1c5da7d
2d67b07
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9daa2dc
4ca3799
2d67b07
 
9d02fe5
 
 
 
 
 
 
2d67b07
4ca3799
2d67b07
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22a6af0
 
 
 
 
2d67b07
46d186a
2d67b07
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
import streamlit as st
import os
import mimetypes
from deface.deface import anonymize_frame
from deface.centerface import CenterFace
from PIL import Image
import imageio.v2 as imageio
import cv2
import tempfile
import os

def streamlit_video_deface_cli(uploaded_file, keep_audio):
    temp_video_path = tempfile.NamedTemporaryFile(delete=False, suffix=".mp4").name
    processed_video_temp_path = tempfile.NamedTemporaryFile(delete=False, suffix=".mp4").name

    with open(temp_video_path, 'wb') as f:
        f.write(uploaded_file.read())

    if keep_audio:
        command = f"deface {temp_video_path} --keep-audio -o {processed_video_temp_path}"
    else:
        command = f"deface {temp_video_path} -o {processed_video_temp_path}"
    
    os.system(command)

    return processed_video_temp_path


def process_video_frame(frame, centerface, threshold, replacewith, mask_scale, ellipse, draw_scores, replaceimg=None, mosaicsize=20):
    dets, _ = centerface(frame, threshold=threshold)
    anonymize_frame(dets, frame, mask_scale=mask_scale, replacewith=replacewith, ellipse=ellipse, draw_scores=draw_scores, replaceimg=replaceimg, mosaicsize=mosaicsize)
    return frame


def streamlit_video_detect(uploaded_file, centerface, threshold, replacewith, mask_scale, ellipse, draw_scores, enable_preview, keep_audio, replaceimg=None, mosaicsize=20):
    temp_video_path = tempfile.NamedTemporaryFile(delete=False, suffix=".mp4").name
    processed_video_temp_path = tempfile.NamedTemporaryFile(delete=False, suffix=".mp4").name
    output_with_audio_temp_path = tempfile.NamedTemporaryFile(delete=False, suffix=".mp4").name
    with open(temp_video_path, 'wb') as f:
        f.write(uploaded_file.read())
    
    vidcap = cv2.VideoCapture(temp_video_path)
    
    # Get video properties
    width  = int(vidcap.get(cv2.CAP_PROP_FRAME_WIDTH))
    height = int(vidcap.get(cv2.CAP_PROP_FRAME_HEIGHT))
    fps    = vidcap.get(cv2.CAP_PROP_FPS)
    total_frames = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT))

    
    # Create a video writer object
    out = cv2.VideoWriter(processed_video_temp_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (width, height))
    
    # Progress Bar setup
    progress_text = "Processing video. Please wait."
    my_bar = st.progress(0, text=progress_text)
    frames_processed = 0
    
    while True:
        ret, frame = vidcap.read()
        if not ret:
            break
        
        processed_frame = process_video_frame(frame, centerface, threshold, replacewith, mask_scale, ellipse, draw_scores, replaceimg, mosaicsize)
        out.write(processed_frame)

        # Progress Bar update
        frames_processed += 1
        progress_percent = int((frames_processed / total_frames) * 100)
        my_bar.progress(progress_percent, text=progress_text)
    
    vidcap.release()
    out.release()
    
    # Empty the progress bar after completion
    my_bar.empty()
    st.write("Re-encoding video. Please wait...")
    os.system(f"ffmpeg -y -i {processed_video_temp_path} -c:v libx264 {processed_video_temp_path.split('.')[0]}_processed_.mp4")
    # If keep_audio is checked, use FFmpeg to overlay the original audio on the processed video
    if keep_audio:
        try:
            st.write("Overlaying audio. Please wait...")
            command = f"ffmpeg -y -i {processed_video_temp_path.split('.')[0]}_processed_.mp4 -i {temp_video_path} -c:v copy -c:a aac -strict experimental {output_with_audio_temp_path}"
            os.system(command)
            return output_with_audio_temp_path
        except:
            return f"{processed_video_temp_path.split('.')[0]}_processed_.mp4"
    else:
        return f"{processed_video_temp_path.split('.')[0]}_processed_.mp4"


def streamlit_image_detect(uploaded_file, centerface, threshold, replacewith, mask_scale, ellipse, draw_scores, enable_preview, keep_metadata, replaceimg=None, mosaicsize=20):
    # Read the uploaded image into a numpy array
    frame = imageio.imread(uploaded_file)

    # For the sake of this example, I'm skipping the metadata part
    
    dets, _ = centerface(frame, threshold=threshold)
    anonymize_frame(dets, frame, mask_scale=mask_scale, replacewith=replacewith, ellipse=ellipse, draw_scores=draw_scores, replaceimg=replaceimg, mosaicsize=mosaicsize)

    # Convert numpy array back to a PIL.Image so that we can display/save it easily in Streamlit
    result_img = Image.fromarray(frame)
    return result_img

def get_file_type(uploaded_file):
    mime = mimetypes.guess_type(uploaded_file.name)[0]
    if mime is None:
        return None
    if mime.startswith('video'):
        return 'video'
    if mime.startswith('image'):
        return 'image'
    return mime

def main():
    st.title("β˜’οΈπŸ§‘πŸ“· Media Anonymizer πŸ“ΉπŸ§‘β€πŸ¦³β˜’οΈ")
    # description
    st.write("""
    You can upload images or videos and the application will automatically detect faces and blur them out.\nAudio is removed by deafult. Check the box below to keep audio.
    """)
    
    uploaded_files = st.file_uploader("Upload media files (images,videos)", accept_multiple_files=True)
    keep_audio = st.checkbox("Keep Audio")

    for uploaded_file in uploaded_files:
        file_type = get_file_type(uploaded_file)
        
        if file_type == 'image':
            # Initialize centerface or other configurations if needed
            centerface = CenterFace() 
            result_img = streamlit_image_detect(uploaded_file, centerface, threshold=0.2, replacewith='blur', mask_scale=1.0, ellipse=True, draw_scores=False, enable_preview=False, keep_metadata=False)
            st.image(result_img, caption=f'Anonymized {uploaded_file.name}', use_column_width=True)
        elif file_type == 'video':
            centerface = CenterFace()
            processed_video_path = streamlit_video_detect(uploaded_file, centerface, threshold=0.2, replacewith='blur', mask_scale=1.0, ellipse=True, draw_scores=False, enable_preview=False, keep_audio=keep_audio)
            st.video(processed_video_path, format='video/mp4', start_time=0)
        else:
            st.write(f"Unsupported file type for {uploaded_file.name}")

if __name__ == "__main__":
    main()