File size: 3,441 Bytes
999ff86 ebfeea8 3be5501 ebfeea8 3be5501 999ff86 3be5501 be8e821 3be5501 ebfeea8 3be5501 9dd97f0 ebfeea8 9dd97f0 be8e821 3be5501 ebfeea8 3be5501 9dd97f0 3be5501 ebfeea8 3be5501 ebfeea8 3be5501 9dd97f0 999ff86 9dd97f0 c4128ad 9dd97f0 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 |
from fawkes.protection import Fawkes
from fawkes.utils import Faces, reverse_process_cloaked
from fawkes.differentiator import FawkesMaskGeneration
import tensorflow as tf
import numpy as np
import gradio as gr
IMG_SIZE = 112
PREPROCESS = 'raw'
# To pre-emptively download the files at boot
fwks_l = Fawkes("extractor_2", '0', 1, mode='low')
fwks_m = Fawkes("extractor_2", '0', 1, mode='mid')
fwks_h = Fawkes("extractor_2", '0', 1, mode='high')
def generate_cloak_images(protector, image_X, target_emb=None):
cloaked_image_X = protector.compute(image_X, target_emb)
return cloaked_image_X
def predict(img, level, th=0.04, sd=1e7, lr=10, max_step=500, batch_size=1, format='png',
separate_target=True, debug=False, no_align=False, exp="", maximize=True,
save_last_on_failed=True, progress=gr.Progress(track_tqdm=True)):
img = img.convert('RGB')
img = tf.keras.utils.img_to_array(img)
if level == 'low':
fwks = fwks_l
elif level == 'mid':
fwks = fwks_m
elif level == 'high':
fwks = fwks_h
current_param = "-".join([str(x) for x in [fwks.th, sd, fwks.lr, fwks.max_step, batch_size, format,
separate_target, debug]])
faces = Faces(['./Current Face'], [img], fwks.aligner, verbose=0, no_align=False)
original_images = faces.cropped_faces
if len(original_images) == 0:
raise Exception("No face detected. ")
original_images = np.array(original_images)
if current_param != fwks.protector_param:
fwks.protector_param = current_param
if fwks.protector is not None:
del fwks.protector
if batch_size == -1:
batch_size = len(original_images)
fwks.protector = FawkesMaskGeneration(fwks.feature_extractors_ls,
batch_size=batch_size,
mimic_img=True,
intensity_range=PREPROCESS,
initial_const=sd,
learning_rate=fwks.lr,
max_iterations=fwks.max_step,
l_threshold=fwks.th,
verbose=0,
maximize=maximize,
keep_final=False,
image_shape=(IMG_SIZE, IMG_SIZE, 3),
loss_method='features',
tanh_process=True,
save_last_on_failed=save_last_on_failed,
)
protected_images = generate_cloak_images(fwks.protector, original_images)
faces.cloaked_cropped_faces = protected_images
final_images, _ = faces.merge_faces(
reverse_process_cloaked(protected_images, preprocess=PREPROCESS),
reverse_process_cloaked(original_images, preprocess=PREPROCESS))
return final_images[-1].astype(np.uint8)
gr.Interface(fn=predict, inputs=[gr.components.Image(type='pil'),
gr.components.Radio(["low", "mid", "high"], label="Protection Level")],
outputs=gr.components.Image(type="numpy"), allow_flagging="never").launch(show_error=True, quiet=False)
|