diff --git a/.assets/models/GFPGANv1.4.pth b/.assets/models/GFPGANv1.4.pth
new file mode 100644
index 0000000000000000000000000000000000000000..afedb5c7e826056840c9cc183f2c6f0186fd17ba
--- /dev/null
+++ b/.assets/models/GFPGANv1.4.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e2cd4703ab14f4d01fd1383a8a8b266f9a5833dacee8e6a79d3bf21a1b6be5ad
+size 348632874
diff --git a/.assets/models/RealESRGAN_x4plus.pth b/.assets/models/RealESRGAN_x4plus.pth
new file mode 100644
index 0000000000000000000000000000000000000000..9ddced536d07803300536317fef662bb499bca71
--- /dev/null
+++ b/.assets/models/RealESRGAN_x4plus.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4fa0d38905f75ac06eb49a7951b426670021be3018265fd191d2125df9d682f1
+size 67040989
diff --git a/.assets/models/inswapper_128.onnx b/.assets/models/inswapper_128.onnx
new file mode 100644
index 0000000000000000000000000000000000000000..cb672b799d74fdf7ab8b172a1b1d78411f6400f5
--- /dev/null
+++ b/.assets/models/inswapper_128.onnx
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e4a3f08c753cb72d04e10aa0f7dbe3deebbf39567d4ead6dce08e98aa49e16af
+size 554253681
diff --git a/.editorconfig b/.editorconfig
new file mode 100644
index 0000000000000000000000000000000000000000..b88a39dcf36b90aae0763caaee5e3afe0cc4159f
--- /dev/null
+++ b/.editorconfig
@@ -0,0 +1,8 @@
+root = true
+
+[*]
+end_of_line = lf
+insert_final_newline = true
+indent_size = 4
+indent_style = tab
+trim_trailing_whitespace = true
diff --git a/.flake8 b/.flake8
new file mode 100644
index 0000000000000000000000000000000000000000..dea8fc8c0abdfb1ef100bbe489456ec4c04f3073
--- /dev/null
+++ b/.flake8
@@ -0,0 +1,3 @@
+[flake8]
+select = E3, E4, F
+per-file-ignores = facefusion/core.py:E402,F401
\ No newline at end of file
diff --git a/.gitattributes b/.gitattributes
index a6344aac8c09253b3b630fb776ae94478aa0275b..2448af21d6983b6c4e360a4d6f7b1bd9c3a4cab7 100644
--- a/.gitattributes
+++ b/.gitattributes
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
 *.zip filter=lfs diff=lfs merge=lfs -text
 *.zst filter=lfs diff=lfs merge=lfs -text
 *tfevents* filter=lfs diff=lfs merge=lfs -text
+.github/preview.png filter=lfs diff=lfs merge=lfs -text
diff --git a/.github/FUNDING.yml b/.github/FUNDING.yml
new file mode 100644
index 0000000000000000000000000000000000000000..718d8a695a46024d6d04f1b183f42c1d51b02a46
--- /dev/null
+++ b/.github/FUNDING.yml
@@ -0,0 +1,2 @@
+github: henryruhs
+custom: https://paypal.me/henryruhs
diff --git a/.github/preview.png b/.github/preview.png
new file mode 100644
index 0000000000000000000000000000000000000000..21d800e8525d241fc07238fcd28fc314bd1f2e2b
--- /dev/null
+++ b/.github/preview.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:90f9882b1a3fd51272d42384f5f7a84082749eaf2fb874125b942af077045801
+size 1099968
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
new file mode 100644
index 0000000000000000000000000000000000000000..a0016eab827b234954490212b1b687c904a5c85b
--- /dev/null
+++ b/.github/workflows/ci.yml
@@ -0,0 +1,34 @@
+name: ci
+
+on: [ push, pull_request ]
+
+jobs:
+ lint:
+  runs-on: ubuntu-latest
+  steps:
+  - name: Checkout
+    uses: actions/checkout@v2
+  - name: Set up Python 3.10
+    uses: actions/setup-python@v2
+    with:
+     python-version: '3.10'
+  - run: pip install flake8
+  - run: pip install mypy
+  - run: flake8 run.py facefusion tests
+  - run: mypy run.py facefusion tests
+ test:
+  strategy:
+   matrix:
+    os: [ macos-latest, ubuntu-latest, windows-latest ]
+  runs-on: ${{ matrix.os }}
+  steps:
+  - name: Checkout
+    uses: actions/checkout@v2
+  - name: Set up ffmpeg
+    uses: FedericoCarboni/setup-ffmpeg@v2
+  - name: Set up Python 3.10
+    uses: actions/setup-python@v2
+    with:
+     python-version: '3.10'
+  - run: pip install -r requirements-ci.txt
+  - run: pytest
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..cea2670e3acb95e2a444c634d927227149c6cf17
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,4 @@
+.assets
+.idea
+.vscode
+
diff --git a/1685074910001_vtqikl_2_0-boy-dp-image-77-720x704.jpg b/1685074910001_vtqikl_2_0-boy-dp-image-77-720x704.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..26e92483629da7903094509f0978da1e9ba1599f
Binary files /dev/null and b/1685074910001_vtqikl_2_0-boy-dp-image-77-720x704.jpg differ
diff --git a/1685074910001_vtqikl_2_0-images 2.jpeg b/1685074910001_vtqikl_2_0-images 2.jpeg
new file mode 100644
index 0000000000000000000000000000000000000000..207390aa18d050d398c4b4a437c41f4f7d95979d
Binary files /dev/null and b/1685074910001_vtqikl_2_0-images 2.jpeg differ
diff --git a/1685074910001_vtqikl_2_0-images.jpg b/1685074910001_vtqikl_2_0-images.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..fd41ede0dda621bee6077561dac4b1ec773f4f2b
Binary files /dev/null and b/1685074910001_vtqikl_2_0-images.jpg differ
diff --git a/DeepFakeAI/__init__.py b/DeepFakeAI/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/DeepFakeAI/__pycache__/__init__.cpython-310.pyc b/DeepFakeAI/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..568166eb25f4837fa6a81e00f941a91c48e4c679
Binary files /dev/null and b/DeepFakeAI/__pycache__/__init__.cpython-310.pyc differ
diff --git a/DeepFakeAI/__pycache__/capturer.cpython-310.pyc b/DeepFakeAI/__pycache__/capturer.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5d80c2f73ab16d42170e9aa2d07e0eeb8752448e
Binary files /dev/null and b/DeepFakeAI/__pycache__/capturer.cpython-310.pyc differ
diff --git a/DeepFakeAI/__pycache__/choices.cpython-310.pyc b/DeepFakeAI/__pycache__/choices.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..74879608ab48ca7837a52aaaeb9fc2c63b0cb32e
Binary files /dev/null and b/DeepFakeAI/__pycache__/choices.cpython-310.pyc differ
diff --git a/DeepFakeAI/__pycache__/core.cpython-310.pyc b/DeepFakeAI/__pycache__/core.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8ea71515cbe72df3079ce654afa30710e610a377
Binary files /dev/null and b/DeepFakeAI/__pycache__/core.cpython-310.pyc differ
diff --git a/DeepFakeAI/__pycache__/face_analyser.cpython-310.pyc b/DeepFakeAI/__pycache__/face_analyser.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0c11bfcc38886e10f8798454cf33a535af014060
Binary files /dev/null and b/DeepFakeAI/__pycache__/face_analyser.cpython-310.pyc differ
diff --git a/DeepFakeAI/__pycache__/face_reference.cpython-310.pyc b/DeepFakeAI/__pycache__/face_reference.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b03fdcd1e92bfbd12fdca43d27f9fccdc83bc16b
Binary files /dev/null and b/DeepFakeAI/__pycache__/face_reference.cpython-310.pyc differ
diff --git a/DeepFakeAI/__pycache__/globals.cpython-310.pyc b/DeepFakeAI/__pycache__/globals.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f6ba2c8bd91f7559eb4569f0f3d4bec929da3705
Binary files /dev/null and b/DeepFakeAI/__pycache__/globals.cpython-310.pyc differ
diff --git a/DeepFakeAI/__pycache__/metadata.cpython-310.pyc b/DeepFakeAI/__pycache__/metadata.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e8d6b56c563d81d746e1731d5632e6c2b6a17885
Binary files /dev/null and b/DeepFakeAI/__pycache__/metadata.cpython-310.pyc differ
diff --git a/DeepFakeAI/__pycache__/predictor.cpython-310.pyc b/DeepFakeAI/__pycache__/predictor.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..18c3a742353e4579d2a5f780b1f08963cd660f6c
Binary files /dev/null and b/DeepFakeAI/__pycache__/predictor.cpython-310.pyc differ
diff --git a/DeepFakeAI/__pycache__/typing.cpython-310.pyc b/DeepFakeAI/__pycache__/typing.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..57e4cd024929c3c809bc9d4a5565b245b72ad489
Binary files /dev/null and b/DeepFakeAI/__pycache__/typing.cpython-310.pyc differ
diff --git a/DeepFakeAI/__pycache__/utilities.cpython-310.pyc b/DeepFakeAI/__pycache__/utilities.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e89b852e9303a63edca277e4463bd9227713f709
Binary files /dev/null and b/DeepFakeAI/__pycache__/utilities.cpython-310.pyc differ
diff --git a/DeepFakeAI/__pycache__/wording.cpython-310.pyc b/DeepFakeAI/__pycache__/wording.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..370e7d920c50ba8f188ebb966fae358a999e06ae
Binary files /dev/null and b/DeepFakeAI/__pycache__/wording.cpython-310.pyc differ
diff --git a/DeepFakeAI/capturer.py b/DeepFakeAI/capturer.py
new file mode 100644
index 0000000000000000000000000000000000000000..1f01b1eecbf6076ff77ecaf476b7d980328ed131
--- /dev/null
+++ b/DeepFakeAI/capturer.py
@@ -0,0 +1,22 @@
+from typing import Optional
+import cv2
+
+from facefusion.typing import Frame
+
+
+def get_video_frame(video_path : str, frame_number : int = 0) -> Optional[Frame]:
+	capture = cv2.VideoCapture(video_path)
+	frame_total = capture.get(cv2.CAP_PROP_FRAME_COUNT)
+	capture.set(cv2.CAP_PROP_POS_FRAMES, min(frame_total, frame_number - 1))
+	has_frame, frame = capture.read()
+	capture.release()
+	if has_frame:
+		return frame
+	return None
+
+
+def get_video_frame_total(video_path : str) -> int:
+	capture = cv2.VideoCapture(video_path)
+	video_frame_total = int(capture.get(cv2.CAP_PROP_FRAME_COUNT))
+	capture.release()
+	return video_frame_total
diff --git a/DeepFakeAI/choices.py b/DeepFakeAI/choices.py
new file mode 100644
index 0000000000000000000000000000000000000000..ff1312ac1d473cb05ae12cf2d2af2ecac211b56f
--- /dev/null
+++ b/DeepFakeAI/choices.py
@@ -0,0 +1,10 @@
+from typing import List
+
+from facefusion.typing import FaceRecognition, FaceAnalyserDirection, FaceAnalyserAge, FaceAnalyserGender, TempFrameFormat, OutputVideoEncoder
+
+face_recognition : List[FaceRecognition] = [ 'reference', 'many' ]
+face_analyser_direction : List[FaceAnalyserDirection] = [ 'left-right', 'right-left', 'top-bottom', 'bottom-top', 'small-large', 'large-small']
+face_analyser_age : List[FaceAnalyserAge] = [ 'child', 'teen', 'adult', 'senior' ]
+face_analyser_gender : List[FaceAnalyserGender] = [ 'male', 'female' ]
+temp_frame_format : List[TempFrameFormat] = [ 'jpg', 'png' ]
+output_video_encoder : List[OutputVideoEncoder] = [ 'libx264', 'libx265', 'libvpx-vp9', 'h264_nvenc', 'hevc_nvenc' ]
diff --git a/DeepFakeAI/core.py b/DeepFakeAI/core.py
new file mode 100644
index 0000000000000000000000000000000000000000..d3b423a4f2f2e0cfaeacbea394cfa5a1d9fe744d
--- /dev/null
+++ b/DeepFakeAI/core.py
@@ -0,0 +1,222 @@
+#!/usr/bin/env python3
+
+import os
+# single thread doubles cuda performance
+os.environ['OMP_NUM_THREADS'] = '1'
+# reduce tensorflow log level
+os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
+import sys
+import warnings
+from typing import List
+import platform
+import signal
+import shutil
+import argparse
+import onnxruntime
+import tensorflow
+
+import facefusion.choices
+import facefusion.globals
+from facefusion import wording, metadata
+from facefusion.predictor import predict_image, predict_video
+from facefusion.processors.frame.core import get_frame_processors_modules
+from facefusion.utilities import is_image, is_video, detect_fps, create_video, extract_frames, get_temp_frame_paths, restore_audio, create_temp, move_temp, clear_temp, normalize_output_path, list_module_names, decode_execution_providers, encode_execution_providers
+
+warnings.filterwarnings('ignore', category = FutureWarning, module = 'insightface')
+warnings.filterwarnings('ignore', category = UserWarning, module = 'torchvision')
+
+
+def parse_args() -> None:
+	signal.signal(signal.SIGINT, lambda signal_number, frame: destroy())
+	program = argparse.ArgumentParser(formatter_class = lambda prog: argparse.HelpFormatter(prog, max_help_position = 120))
+	program.add_argument('-s', '--source', help = wording.get('source_help'), dest = 'source_path')
+	program.add_argument('-t', '--target', help = wording.get('target_help'), dest = 'target_path')
+	program.add_argument('-o', '--output', help = wording.get('output_help'), dest = 'output_path')
+	program.add_argument('--frame-processors', help = wording.get('frame_processors_help').format(choices = ', '.join(list_module_names('facefusion/processors/frame/modules'))), dest = 'frame_processors', default = ['face_swapper'], nargs='+')
+	program.add_argument('--ui-layouts', help = wording.get('ui_layouts_help').format(choices = ', '.join(list_module_names('facefusion/uis/layouts'))), dest = 'ui_layouts', default = ['default'], nargs='+')
+	program.add_argument('--keep-fps', help = wording.get('keep_fps_help'), dest = 'keep_fps', action='store_true')
+	program.add_argument('--keep-temp', help = wording.get('keep_temp_help'), dest = 'keep_temp', action='store_true')
+	program.add_argument('--skip-audio', help = wording.get('skip_audio_help'), dest = 'skip_audio', action='store_true')
+	program.add_argument('--face-recognition', help = wording.get('face_recognition_help'), dest = 'face_recognition', default = 'reference', choices = facefusion.choices.face_recognition)
+	program.add_argument('--face-analyser-direction', help = wording.get('face_analyser_direction_help'), dest = 'face_analyser_direction', default = 'left-right', choices = facefusion.choices.face_analyser_direction)
+	program.add_argument('--face-analyser-age', help = wording.get('face_analyser_age_help'), dest = 'face_analyser_age', choices = facefusion.choices.face_analyser_age)
+	program.add_argument('--face-analyser-gender', help = wording.get('face_analyser_gender_help'), dest = 'face_analyser_gender', choices = facefusion.choices.face_analyser_gender)
+	program.add_argument('--reference-face-position', help = wording.get('reference_face_position_help'), dest = 'reference_face_position', type = int, default = 0)
+	program.add_argument('--reference-face-distance', help = wording.get('reference_face_distance_help'), dest = 'reference_face_distance', type = float, default = 1.5)
+	program.add_argument('--reference-frame-number', help = wording.get('reference_frame_number_help'), dest = 'reference_frame_number', type = int, default = 0)
+	program.add_argument('--trim-frame-start', help = wording.get('trim_frame_start_help'), dest = 'trim_frame_start', type = int)
+	program.add_argument('--trim-frame-end', help = wording.get('trim_frame_end_help'), dest = 'trim_frame_end', type = int)
+	program.add_argument('--temp-frame-format', help = wording.get('temp_frame_format_help'), dest = 'temp_frame_format', default = 'jpg', choices = facefusion.choices.temp_frame_format)
+	program.add_argument('--temp-frame-quality', help = wording.get('temp_frame_quality_help'), dest = 'temp_frame_quality', type = int, default = 100, choices = range(101), metavar = '[0-100]')
+	program.add_argument('--output-video-encoder', help = wording.get('output_video_encoder_help'), dest = 'output_video_encoder', default = 'libx264', choices = facefusion.choices.output_video_encoder)
+	program.add_argument('--output-video-quality', help = wording.get('output_video_quality_help'), dest = 'output_video_quality', type = int, default = 90, choices = range(101), metavar = '[0-100]')
+	program.add_argument('--max-memory', help = wording.get('max_memory_help'), dest = 'max_memory', type = int)
+	program.add_argument('--execution-providers', help = wording.get('execution_providers_help').format(choices = 'cpu'), dest = 'execution_providers', default = ['cpu'], choices = suggest_execution_providers_choices(), nargs='+')
+	program.add_argument('--execution-thread-count', help = wording.get('execution_thread_count_help'), dest = 'execution_thread_count', type = int, default = suggest_execution_thread_count_default())
+	program.add_argument('--execution-queue-count', help = wording.get('execution_queue_count_help'), dest = 'execution_queue_count', type = int, default = 1)
+	program.add_argument('-v', '--version', action='version', version = metadata.get('name') + ' ' + metadata.get('version'))
+
+	args = program.parse_args()
+
+	facefusion.globals.source_path = args.source_path
+	facefusion.globals.target_path = args.target_path
+	facefusion.globals.output_path = normalize_output_path(facefusion.globals.source_path, facefusion.globals.target_path, args.output_path)
+	facefusion.globals.headless = facefusion.globals.source_path is not None and facefusion.globals.target_path is not None and facefusion.globals.output_path is not None
+	facefusion.globals.frame_processors = args.frame_processors
+	facefusion.globals.ui_layouts = args.ui_layouts
+	facefusion.globals.keep_fps = args.keep_fps
+	facefusion.globals.keep_temp = args.keep_temp
+	facefusion.globals.skip_audio = args.skip_audio
+	facefusion.globals.face_recognition = args.face_recognition
+	facefusion.globals.face_analyser_direction = args.face_analyser_direction
+	facefusion.globals.face_analyser_age = args.face_analyser_age
+	facefusion.globals.face_analyser_gender = args.face_analyser_gender
+	facefusion.globals.reference_face_position = args.reference_face_position
+	facefusion.globals.reference_frame_number = args.reference_frame_number
+	facefusion.globals.reference_face_distance = args.reference_face_distance
+	facefusion.globals.trim_frame_start = args.trim_frame_start
+	facefusion.globals.trim_frame_end = args.trim_frame_end
+	facefusion.globals.temp_frame_format = args.temp_frame_format
+	facefusion.globals.temp_frame_quality = args.temp_frame_quality
+	facefusion.globals.output_video_encoder = args.output_video_encoder
+	facefusion.globals.output_video_quality = args.output_video_quality
+	facefusion.globals.max_memory = args.max_memory
+	facefusion.globals.execution_providers = decode_execution_providers(args.execution_providers)
+	facefusion.globals.execution_thread_count = args.execution_thread_count
+	facefusion.globals.execution_queue_count = args.execution_queue_count
+
+
+def suggest_execution_providers_choices() -> List[str]:
+	return encode_execution_providers(onnxruntime.get_available_providers())
+
+
+def suggest_execution_thread_count_default() -> int:
+	if 'CUDAExecutionProvider' in onnxruntime.get_available_providers():
+		return 8
+	return 1
+
+
+def limit_resources() -> None:
+	# prevent tensorflow memory leak
+	gpus = tensorflow.config.experimental.list_physical_devices('GPU')
+	for gpu in gpus:
+		tensorflow.config.experimental.set_virtual_device_configuration(gpu, [
+			tensorflow.config.experimental.VirtualDeviceConfiguration(memory_limit = 1024)
+		])
+	# limit memory usage
+	if facefusion.globals.max_memory:
+		memory = facefusion.globals.max_memory * 1024 ** 3
+		if platform.system().lower() == 'darwin':
+			memory = facefusion.globals.max_memory * 1024 ** 6
+		if platform.system().lower() == 'windows':
+			import ctypes
+			kernel32 = ctypes.windll.kernel32 # type: ignore[attr-defined]
+			kernel32.SetProcessWorkingSetSize(-1, ctypes.c_size_t(memory), ctypes.c_size_t(memory))
+		else:
+			import resource
+			resource.setrlimit(resource.RLIMIT_DATA, (memory, memory))
+
+
+def update_status(message : str, scope : str = 'FACEFUSION.CORE') -> None:
+	print('[' + scope + '] ' + message)
+
+
+def pre_check() -> bool:
+	if sys.version_info < (3, 10):
+		update_status(wording.get('python_not_supported').format(version = '3.10'))
+		return False
+	if not shutil.which('ffmpeg'):
+		update_status(wording.get('ffmpeg_not_installed'))
+		return False
+	return True
+
+
+def process_image() -> None:
+	if predict_image(facefusion.globals.target_path):
+		return
+	shutil.copy2(facefusion.globals.target_path, facefusion.globals.output_path)
+	# process frame
+	for frame_processor_module in get_frame_processors_modules(facefusion.globals.frame_processors):
+		update_status(wording.get('processing'), frame_processor_module.NAME)
+		frame_processor_module.process_image(facefusion.globals.source_path, facefusion.globals.output_path, facefusion.globals.output_path)
+		frame_processor_module.post_process()
+	# validate image
+	if is_image(facefusion.globals.target_path):
+		update_status(wording.get('processing_image_succeed'))
+	else:
+		update_status(wording.get('processing_image_failed'))
+
+
+def process_video() -> None:
+	if predict_video(facefusion.globals.target_path):
+		return
+	fps = detect_fps(facefusion.globals.target_path) if facefusion.globals.keep_fps else 25.0
+	update_status(wording.get('creating_temp'))
+	create_temp(facefusion.globals.target_path)
+	# extract frames
+	update_status(wording.get('extracting_frames_fps').format(fps = fps))
+	extract_frames(facefusion.globals.target_path, fps)
+	# process frame
+	temp_frame_paths = get_temp_frame_paths(facefusion.globals.target_path)
+	if temp_frame_paths:
+		for frame_processor_module in get_frame_processors_modules(facefusion.globals.frame_processors):
+			update_status(wording.get('processing'), frame_processor_module.NAME)
+			frame_processor_module.process_video(facefusion.globals.source_path, temp_frame_paths)
+			frame_processor_module.post_process()
+	else:
+		update_status(wording.get('temp_frames_not_found'))
+		return
+	# create video
+	update_status(wording.get('creating_video_fps').format(fps = fps))
+	if not create_video(facefusion.globals.target_path, fps):
+		update_status(wording.get('creating_video_failed'))
+		return
+	# handle audio
+	if facefusion.globals.skip_audio:
+		update_status(wording.get('skipping_audio'))
+		move_temp(facefusion.globals.target_path, facefusion.globals.output_path)
+	else:
+		update_status(wording.get('restoring_audio'))
+		restore_audio(facefusion.globals.target_path, facefusion.globals.output_path)
+	# clear temp
+	update_status(wording.get('clearing_temp'))
+	clear_temp(facefusion.globals.target_path)
+	# validate video
+	if is_video(facefusion.globals.target_path):
+		update_status(wording.get('processing_video_succeed'))
+	else:
+		update_status(wording.get('processing_video_failed'))
+
+
+def conditional_process() -> None:
+	for frame_processor_module in get_frame_processors_modules(facefusion.globals.frame_processors):
+		if not frame_processor_module.pre_process():
+			return
+	if is_image(facefusion.globals.target_path):
+		process_image()
+	if is_video(facefusion.globals.target_path):
+		process_video()
+
+
+def run() -> None:
+	parse_args()
+	limit_resources()
+	# pre check
+	if not pre_check():
+		return
+	for frame_processor in get_frame_processors_modules(facefusion.globals.frame_processors):
+		if not frame_processor.pre_check():
+			return
+	# process or launch
+	if facefusion.globals.headless:
+		conditional_process()
+	else:
+		import facefusion.uis.core as ui
+
+		ui.launch()
+
+
+def destroy() -> None:
+	if facefusion.globals.target_path:
+		clear_temp(facefusion.globals.target_path)
+	sys.exit()
diff --git a/DeepFakeAI/face_analyser.py b/DeepFakeAI/face_analyser.py
new file mode 100644
index 0000000000000000000000000000000000000000..1929840eb8efac04b9a223d9b03296f105ae77d7
--- /dev/null
+++ b/DeepFakeAI/face_analyser.py
@@ -0,0 +1,106 @@
+import threading
+from typing import Any, Optional, List
+import insightface
+import numpy
+
+import facefusion.globals
+from facefusion.typing import Frame, Face, FaceAnalyserDirection, FaceAnalyserAge, FaceAnalyserGender
+
+FACE_ANALYSER = None
+THREAD_LOCK = threading.Lock()
+
+
+def get_face_analyser() -> Any:
+	global FACE_ANALYSER
+
+	with THREAD_LOCK:
+		if FACE_ANALYSER is None:
+			FACE_ANALYSER = insightface.app.FaceAnalysis(name = 'buffalo_l', providers = facefusion.globals.execution_providers)
+			FACE_ANALYSER.prepare(ctx_id = 0)
+	return FACE_ANALYSER
+
+
+def clear_face_analyser() -> Any:
+	global FACE_ANALYSER
+
+	FACE_ANALYSER = None
+
+
+def get_one_face(frame : Frame, position : int = 0) -> Optional[Face]:
+	many_faces = get_many_faces(frame)
+	if many_faces:
+		try:
+			return many_faces[position]
+		except IndexError:
+			return many_faces[-1]
+	return None
+
+
+def get_many_faces(frame : Frame) -> List[Face]:
+	try:
+		faces = get_face_analyser().get(frame)
+		if facefusion.globals.face_analyser_direction:
+			faces = sort_by_direction(faces, facefusion.globals.face_analyser_direction)
+		if facefusion.globals.face_analyser_age:
+			faces = filter_by_age(faces, facefusion.globals.face_analyser_age)
+		if facefusion.globals.face_analyser_gender:
+			faces = filter_by_gender(faces, facefusion.globals.face_analyser_gender)
+		return faces
+	except (AttributeError, ValueError):
+		return []
+
+
+def find_similar_faces(frame : Frame, reference_face : Face, face_distance : float) -> List[Face]:
+	many_faces = get_many_faces(frame)
+	similar_faces = []
+	if many_faces:
+		for face in many_faces:
+			if hasattr(face, 'normed_embedding') and hasattr(reference_face, 'normed_embedding'):
+				current_face_distance = numpy.sum(numpy.square(face.normed_embedding - reference_face.normed_embedding))
+				if current_face_distance < face_distance:
+					similar_faces.append(face)
+	return similar_faces
+
+
+def sort_by_direction(faces : List[Face], direction : FaceAnalyserDirection) -> List[Face]:
+	if direction == 'left-right':
+		return sorted(faces, key = lambda face: face['bbox'][0])
+	if direction == 'right-left':
+		return sorted(faces, key = lambda face: face['bbox'][0], reverse = True)
+	if direction == 'top-bottom':
+		return sorted(faces, key = lambda face: face['bbox'][1])
+	if direction == 'bottom-top':
+		return sorted(faces, key = lambda face: face['bbox'][1], reverse = True)
+	if direction == 'small-large':
+		return sorted(faces, key = lambda face: (face['bbox'][2] - face['bbox'][0]) * (face['bbox'][3] - face['bbox'][1]))
+	if direction == 'large-small':
+		return sorted(faces, key = lambda face: (face['bbox'][2] - face['bbox'][0]) * (face['bbox'][3] - face['bbox'][1]), reverse = True)
+	return faces
+
+
+def filter_by_age(faces : List[Face], age : FaceAnalyserAge) -> List[Face]:
+	filter_faces = []
+	for face in faces:
+		if face['age'] < 13 and age == 'child':
+			filter_faces.append(face)
+		elif face['age'] < 19 and age == 'teen':
+			filter_faces.append(face)
+		elif face['age'] < 60 and age == 'adult':
+			filter_faces.append(face)
+		elif face['age'] > 59 and age == 'senior':
+			filter_faces.append(face)
+	return filter_faces
+
+
+def filter_by_gender(faces : List[Face], gender : FaceAnalyserGender) -> List[Face]:
+	filter_faces = []
+	for face in faces:
+		if face['gender'] == 1 and gender == 'male':
+			filter_faces.append(face)
+		if face['gender'] == 0 and gender == 'female':
+			filter_faces.append(face)
+	return filter_faces
+
+
+def get_faces_total(frame : Frame) -> int:
+	return len(get_many_faces(frame))
diff --git a/DeepFakeAI/face_reference.py b/DeepFakeAI/face_reference.py
new file mode 100644
index 0000000000000000000000000000000000000000..72281fe6ad8dfaa7d35382011686761752077a94
--- /dev/null
+++ b/DeepFakeAI/face_reference.py
@@ -0,0 +1,21 @@
+from typing import Optional
+
+from facefusion.typing import Face
+
+FACE_REFERENCE = None
+
+
+def get_face_reference() -> Optional[Face]:
+	return FACE_REFERENCE
+
+
+def set_face_reference(face : Face) -> None:
+	global FACE_REFERENCE
+
+	FACE_REFERENCE = face
+
+
+def clear_face_reference() -> None:
+	global FACE_REFERENCE
+
+	FACE_REFERENCE = None
diff --git a/DeepFakeAI/globals.py b/DeepFakeAI/globals.py
new file mode 100644
index 0000000000000000000000000000000000000000..d88ec95e34345f2a528e4e78e29cc1ce8af845a8
--- /dev/null
+++ b/DeepFakeAI/globals.py
@@ -0,0 +1,30 @@
+from typing import List, Optional
+
+from facefusion.typing import FaceRecognition, FaceAnalyserDirection, FaceAnalyserAge, FaceAnalyserGender, TempFrameFormat
+
+source_path : Optional[str] = None
+target_path : Optional[str] = None
+output_path : Optional[str] = None
+headless : Optional[bool] = None
+frame_processors : List[str] = []
+ui_layouts : List[str] = []
+keep_fps : Optional[bool] = None
+keep_temp : Optional[bool] = None
+skip_audio : Optional[bool] = None
+face_recognition : Optional[FaceRecognition] = None
+face_analyser_direction : Optional[FaceAnalyserDirection] = None
+face_analyser_age : Optional[FaceAnalyserAge] = None
+face_analyser_gender : Optional[FaceAnalyserGender] = None
+reference_face_position : Optional[int] = None
+reference_frame_number : Optional[int] = None
+reference_face_distance : Optional[float] = None
+trim_frame_start : Optional[int] = None
+trim_frame_end : Optional[int] = None
+temp_frame_format : Optional[TempFrameFormat] = None
+temp_frame_quality : Optional[int] = None
+output_video_encoder : Optional[str] = None
+output_video_quality : Optional[int] = None
+max_memory : Optional[int] = None
+execution_providers : List[str] = []
+execution_thread_count : Optional[int] = None
+execution_queue_count : Optional[int] = None
diff --git a/DeepFakeAI/metadata.py b/DeepFakeAI/metadata.py
new file mode 100644
index 0000000000000000000000000000000000000000..b6f1ac56f45ba1c9c276ed2c179962a92b59b998
--- /dev/null
+++ b/DeepFakeAI/metadata.py
@@ -0,0 +1,13 @@
+METADATA =\
+{
+	'name': 'FaceFusion',
+	'description': 'Next generation face swapper and enhancer',
+	'version': '1.0.0',
+	'license': 'MIT',
+	'author': 'Henry Ruhs',
+	'url': 'https://facefusion.io'
+}
+
+
+def get(key : str) -> str:
+	return METADATA[key]
diff --git a/DeepFakeAI/predictor.py b/DeepFakeAI/predictor.py
new file mode 100644
index 0000000000000000000000000000000000000000..75b6ed4d328db9f991bbeb7c1ae425a62b4fbacb
--- /dev/null
+++ b/DeepFakeAI/predictor.py
@@ -0,0 +1,43 @@
+import threading
+import numpy
+import opennsfw2
+from PIL import Image
+from keras import Model
+
+from facefusion.typing import Frame
+
+PREDICTOR = None
+THREAD_LOCK = threading.Lock()
+MAX_PROBABILITY = 0.75
+
+
+def get_predictor() -> Model:
+	global PREDICTOR
+
+	with THREAD_LOCK:
+		if PREDICTOR is None:
+			PREDICTOR = opennsfw2.make_open_nsfw_model()
+	return PREDICTOR
+
+
+def clear_predictor() -> None:
+	global PREDICTOR
+
+	PREDICTOR = None
+
+
+def predict_frame(target_frame : Frame) -> bool:
+	image = Image.fromarray(target_frame)
+	image = opennsfw2.preprocess_image(image, opennsfw2.Preprocessing.YAHOO)
+	views = numpy.expand_dims(image, axis = 0)
+	_, probability = get_predictor().predict(views)[0]
+	return probability > MAX_PROBABILITY
+
+
+def predict_image(target_path : str) -> bool:
+	return opennsfw2.predict_image(target_path) > MAX_PROBABILITY
+
+
+def predict_video(target_path : str) -> bool:
+	_, probabilities = opennsfw2.predict_video_frames(video_path = target_path, frame_interval = 100)
+	return any(probability > MAX_PROBABILITY for probability in probabilities)
diff --git a/DeepFakeAI/processors/__init__.py b/DeepFakeAI/processors/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/DeepFakeAI/processors/__pycache__/__init__.cpython-310.pyc b/DeepFakeAI/processors/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..85a762e3da95b4ee66eb6d3bb914fb3db928c50f
Binary files /dev/null and b/DeepFakeAI/processors/__pycache__/__init__.cpython-310.pyc differ
diff --git a/DeepFakeAI/processors/frame/__init__.py b/DeepFakeAI/processors/frame/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/DeepFakeAI/processors/frame/__pycache__/__init__.cpython-310.pyc b/DeepFakeAI/processors/frame/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..195ebe452a6f6bbc778c791cfce0e0247bdf6d71
Binary files /dev/null and b/DeepFakeAI/processors/frame/__pycache__/__init__.cpython-310.pyc differ
diff --git a/DeepFakeAI/processors/frame/__pycache__/core.cpython-310.pyc b/DeepFakeAI/processors/frame/__pycache__/core.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6e2bc471d7788483c8f1a04adb250bd3d99a7ecf
Binary files /dev/null and b/DeepFakeAI/processors/frame/__pycache__/core.cpython-310.pyc differ
diff --git a/DeepFakeAI/processors/frame/core.py b/DeepFakeAI/processors/frame/core.py
new file mode 100644
index 0000000000000000000000000000000000000000..73165e2f4eecbf6993b984ce85abcf16ed4b3657
--- /dev/null
+++ b/DeepFakeAI/processors/frame/core.py
@@ -0,0 +1,113 @@
+import os
+import sys
+import importlib
+import psutil
+from concurrent.futures import ThreadPoolExecutor, as_completed
+from queue import Queue
+from types import ModuleType
+from typing import Any, List, Callable
+from tqdm import tqdm
+
+import facefusion.globals
+from facefusion import wording
+
+FRAME_PROCESSORS_MODULES : List[ModuleType] = []
+FRAME_PROCESSORS_METHODS =\
+[
+	'get_frame_processor',
+	'clear_frame_processor',
+	'pre_check',
+	'pre_process',
+	'process_frame',
+	'process_frames',
+	'process_image',
+	'process_video',
+	'post_process'
+]
+
+
+def load_frame_processor_module(frame_processor : str) -> Any:
+	try:
+		frame_processor_module = importlib.import_module('facefusion.processors.frame.modules.' + frame_processor)
+		for method_name in FRAME_PROCESSORS_METHODS:
+			if not hasattr(frame_processor_module, method_name):
+				raise NotImplementedError
+	except ModuleNotFoundError:
+		sys.exit(wording.get('frame_processor_not_loaded').format(frame_processor = frame_processor))
+	except NotImplementedError:
+		sys.exit(wording.get('frame_processor_not_implemented').format(frame_processor = frame_processor))
+	return frame_processor_module
+
+
+def get_frame_processors_modules(frame_processors : List[str]) -> List[ModuleType]:
+	global FRAME_PROCESSORS_MODULES
+
+	if not FRAME_PROCESSORS_MODULES:
+		for frame_processor in frame_processors:
+			frame_processor_module = load_frame_processor_module(frame_processor)
+			FRAME_PROCESSORS_MODULES.append(frame_processor_module)
+	return FRAME_PROCESSORS_MODULES
+
+
+def clear_frame_processors_modules() -> None:
+	global FRAME_PROCESSORS_MODULES
+
+	for frame_processor_module in get_frame_processors_modules(facefusion.globals.frame_processors):
+		frame_processor_module.clear_frame_processor()
+	FRAME_PROCESSORS_MODULES = []
+
+
+def multi_process_frame(source_path : str, temp_frame_paths : List[str], process_frames: Callable[[str, List[str], Any], None], update: Callable[[], None]) -> None:
+	with ThreadPoolExecutor(max_workers = facefusion.globals.execution_thread_count) as executor:
+		futures = []
+		queue = create_queue(temp_frame_paths)
+		queue_per_future = max(len(temp_frame_paths) // facefusion.globals.execution_thread_count * facefusion.globals.execution_queue_count, 1)
+		while not queue.empty():
+			future = executor.submit(process_frames, source_path, pick_queue(queue, queue_per_future), update)
+			futures.append(future)
+		for future in as_completed(futures):
+			future.result()
+
+
+def create_queue(temp_frame_paths : List[str]) -> Queue[str]:
+	queue: Queue[str] = Queue()
+	for frame_path in temp_frame_paths:
+		queue.put(frame_path)
+	return queue
+
+
+def pick_queue(queue : Queue[str], queue_per_future : int) -> List[str]:
+	queues = []
+	for _ in range(queue_per_future):
+		if not queue.empty():
+			queues.append(queue.get())
+	return queues
+
+
+def process_video(source_path : str, frame_paths : List[str], process_frames : Callable[[str, List[str], Any], None]) -> None:
+	progress_bar_format = '{l_bar}{bar}| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, {rate_fmt}{postfix}]'
+	total = len(frame_paths)
+	with tqdm(total = total, desc = wording.get('processing'), unit = 'frame', dynamic_ncols = True, bar_format = progress_bar_format) as progress:
+		multi_process_frame(source_path, frame_paths, process_frames, lambda: update_progress(progress))
+
+
+def update_progress(progress : Any = None) -> None:
+	process = psutil.Process(os.getpid())
+	memory_usage = process.memory_info().rss / 1024 / 1024 / 1024
+	progress.set_postfix(
+	{
+		'memory_usage': '{:.2f}'.format(memory_usage).zfill(5) + 'GB',
+		'execution_providers': facefusion.globals.execution_providers,
+		'execution_thread_count': facefusion.globals.execution_thread_count,
+		'execution_queue_count': facefusion.globals.execution_queue_count
+	})
+	progress.refresh()
+	progress.update(1)
+
+
+def get_device() -> str:
+	if 'CUDAExecutionProvider' in facefusion.globals.execution_providers:
+		return 'cuda'
+	if 'CoreMLExecutionProvider' in facefusion.globals.execution_providers:
+		return 'mps'
+	return 'cpu'
diff --git a/DeepFakeAI/processors/frame/modules/__init__.py b/DeepFakeAI/processors/frame/modules/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/DeepFakeAI/processors/frame/modules/__pycache__/__init__.cpython-310.pyc b/DeepFakeAI/processors/frame/modules/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..310f8f765e398fff96cf1b7e8a5f376285e2ca27
Binary files /dev/null and b/DeepFakeAI/processors/frame/modules/__pycache__/__init__.cpython-310.pyc differ
diff --git a/DeepFakeAI/processors/frame/modules/__pycache__/face_enhancer.cpython-310.pyc b/DeepFakeAI/processors/frame/modules/__pycache__/face_enhancer.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..760d5b04441440c2bf157b758a5fcfb6acad66b2
Binary files /dev/null and b/DeepFakeAI/processors/frame/modules/__pycache__/face_enhancer.cpython-310.pyc differ
diff --git a/DeepFakeAI/processors/frame/modules/__pycache__/face_swapper.cpython-310.pyc b/DeepFakeAI/processors/frame/modules/__pycache__/face_swapper.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..dbab8feea0852b5de97608cf8b8de5a4bd174cc5
Binary files /dev/null and b/DeepFakeAI/processors/frame/modules/__pycache__/face_swapper.cpython-310.pyc differ
diff --git a/DeepFakeAI/processors/frame/modules/__pycache__/frame_enhancer.cpython-310.pyc b/DeepFakeAI/processors/frame/modules/__pycache__/frame_enhancer.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a8def0df810e473832662a5628af2ad9613b28f1
Binary files /dev/null and b/DeepFakeAI/processors/frame/modules/__pycache__/frame_enhancer.cpython-310.pyc differ
diff --git a/DeepFakeAI/processors/frame/modules/face_enhancer.py b/DeepFakeAI/processors/frame/modules/face_enhancer.py
new file mode 100644
index 0000000000000000000000000000000000000000..7c076125c9daef8e9f1b5199998619e10dba8d4e
--- /dev/null
+++ b/DeepFakeAI/processors/frame/modules/face_enhancer.py
@@ -0,0 +1,100 @@
+from typing import Any, List, Callable
+import cv2
+import threading
+from gfpgan.utils import GFPGANer
+
+import facefusion.globals
+import facefusion.processors.frame.core as frame_processors
+from facefusion import wording
+from facefusion.core import update_status
+from facefusion.face_analyser import get_many_faces
+from facefusion.typing import Frame, Face
+from facefusion.utilities import conditional_download, resolve_relative_path, is_image, is_video
+
+FRAME_PROCESSOR = None
+THREAD_SEMAPHORE = threading.Semaphore()
+THREAD_LOCK = threading.Lock()
+NAME = 'FACEFUSION.FRAME_PROCESSOR.FACE_ENHANCER'
+
+
+def get_frame_processor() -> Any:
+	global FRAME_PROCESSOR
+
+	with THREAD_LOCK:
+		if FRAME_PROCESSOR is None:
+			model_path = resolve_relative_path('../.assets/models/GFPGANv1.4.pth')
+			FRAME_PROCESSOR = GFPGANer(
+				model_path = model_path,
+				upscale = 1,
+				device = frame_processors.get_device()
+			)
+	return FRAME_PROCESSOR
+
+
+def clear_frame_processor() -> None:
+	global FRAME_PROCESSOR
+
+	FRAME_PROCESSOR = None
+
+
+def pre_check() -> bool:
+	download_directory_path = resolve_relative_path('../.assets/models')
+	conditional_download(download_directory_path, ['https://github.com/facefusion/facefusion-assets/releases/download/models/GFPGANv1.4.pth'])
+	return True
+
+
+def pre_process() -> bool:
+	if not is_image(facefusion.globals.target_path) and not is_video(facefusion.globals.target_path):
+		update_status(wording.get('select_image_or_video_target') + wording.get('exclamation_mark'), NAME)
+		return False
+	return True
+
+
+def post_process() -> None:
+	clear_frame_processor()
+
+
+def enhance_face(target_face : Face, temp_frame : Frame) -> Frame:
+	start_x, start_y, end_x, end_y = map(int, target_face['bbox'])
+	padding_x = int((end_x - start_x) * 0.5)
+	padding_y = int((end_y - start_y) * 0.5)
+	start_x = max(0, start_x - padding_x)
+	start_y = max(0, start_y - padding_y)
+	end_x = max(0, end_x + padding_x)
+	end_y = max(0, end_y + padding_y)
+	crop_frame = temp_frame[start_y:end_y, start_x:end_x]
+	if crop_frame.size:
+		with THREAD_SEMAPHORE:
+			_, _, crop_frame = get_frame_processor().enhance(
+				crop_frame,
+				paste_back = True
+			)
+		temp_frame[start_y:end_y, start_x:end_x] = crop_frame
+	return temp_frame
+
+
+def process_frame(source_face : Face, reference_face : Face, temp_frame : Frame) -> Frame:
+	many_faces = get_many_faces(temp_frame)
+	if many_faces:
+		for target_face in many_faces:
+			temp_frame = enhance_face(target_face, temp_frame)
+	return temp_frame
+
+
+def process_frames(source_path : str, temp_frame_paths : List[str], update: Callable[[], None]) -> None:
+	for temp_frame_path in temp_frame_paths:
+		temp_frame = cv2.imread(temp_frame_path)
+		result_frame = process_frame(None, None, temp_frame)
+		cv2.imwrite(temp_frame_path, result_frame)
+		if update:
+			update()
+
+
+def process_image(source_path : str, target_path : str, output_path : str) -> None:
+	target_frame = cv2.imread(target_path)
+	result_frame = process_frame(None, None, target_frame)
+	cv2.imwrite(output_path, result_frame)
+
+
+def process_video(source_path : str, temp_frame_paths : List[str]) -> None:
+	facefusion.processors.frame.core.process_video(None, temp_frame_paths, process_frames)
diff --git a/DeepFakeAI/processors/frame/modules/face_swapper.py b/DeepFakeAI/processors/frame/modules/face_swapper.py
new file mode 100644
index 0000000000000000000000000000000000000000..003bebb4173155aca2b77683e273547a9080e380
--- /dev/null
+++ b/DeepFakeAI/processors/frame/modules/face_swapper.py
@@ -0,0 +1,105 @@
+from typing import Any, List, Callable
+import cv2
+import insightface
+import threading
+
+import facefusion.globals
+import facefusion.processors.frame.core as frame_processors
+from facefusion import wording
+from facefusion.core import update_status
+from facefusion.face_analyser import get_one_face, get_many_faces, find_similar_faces
+from facefusion.face_reference import get_face_reference, set_face_reference
+from facefusion.typing import Face, Frame
+from facefusion.utilities import conditional_download, resolve_relative_path, is_image, is_video
+
+FRAME_PROCESSOR = None
+THREAD_LOCK = threading.Lock()
+NAME = 'FACEFUSION.FRAME_PROCESSOR.FACE_SWAPPER'
+
+
+def get_frame_processor() -> Any:
+	global FRAME_PROCESSOR
+
+	with THREAD_LOCK:
+		if FRAME_PROCESSOR is None:
+			model_path = resolve_relative_path('../.assets/models/inswapper_128.onnx')
+			FRAME_PROCESSOR = insightface.model_zoo.get_model(model_path, providers = facefusion.globals.execution_providers)
+	return FRAME_PROCESSOR
+
+
+def clear_frame_processor() -> None:
+	global FRAME_PROCESSOR
+
+	FRAME_PROCESSOR = None
+
+
+def pre_check() -> bool:
+	download_directory_path = resolve_relative_path('../.assets/models')
+	conditional_download(download_directory_path, ['https://github.com/facefusion/facefusion-assets/releases/download/models/inswapper_128.onnx'])
+	return True
+
+
+def pre_process() -> bool:
+	if not is_image(facefusion.globals.source_path):
+		update_status(wording.get('select_image_source') + wording.get('exclamation_mark'), NAME)
+		return False
+	elif not get_one_face(cv2.imread(facefusion.globals.source_path)):
+		update_status(wording.get('no_source_face_detected') + wording.get('exclamation_mark'), NAME)
+		return False
+	if not is_image(facefusion.globals.target_path) and not is_video(facefusion.globals.target_path):
+		update_status(wording.get('select_image_or_video_target') + wording.get('exclamation_mark'), NAME)
+		return False
+	return True
+
+
+def post_process() -> None:
+	clear_frame_processor()
+
+
+def swap_face(source_face : Face, target_face : Face, temp_frame : Frame) -> Frame:
+	return get_frame_processor().get(temp_frame, target_face, source_face, paste_back = True)
+
+
+def process_frame(source_face : Face, reference_face : Face, temp_frame : Frame) -> Frame:
+	if 'reference' in facefusion.globals.face_recognition:
+		similar_faces = find_similar_faces(temp_frame, reference_face, facefusion.globals.reference_face_distance)
+		if similar_faces:
+			for similar_face in similar_faces:
+				temp_frame = swap_face(source_face, similar_face, temp_frame)
+	if 'many' in facefusion.globals.face_recognition:
+		many_faces = get_many_faces(temp_frame)
+		if many_faces:
+			for target_face in many_faces:
+				temp_frame = swap_face(source_face, target_face, temp_frame)
+	return temp_frame
+
+
+def process_frames(source_path : str, temp_frame_paths : List[str], update: Callable[[], None]) -> None:
+	source_face = get_one_face(cv2.imread(source_path))
+	reference_face = get_face_reference() if 'reference' in facefusion.globals.face_recognition else None
+	for temp_frame_path in temp_frame_paths:
+		temp_frame = cv2.imread(temp_frame_path)
+		result_frame = process_frame(source_face, reference_face, temp_frame)
+		cv2.imwrite(temp_frame_path, result_frame)
+		if update:
+			update()
+
+
+def process_image(source_path : str, target_path : str, output_path : str) -> None:
+	source_face = get_one_face(cv2.imread(source_path))
+	target_frame = cv2.imread(target_path)
+	reference_face = get_one_face(target_frame, facefusion.globals.reference_face_position) if 'reference' in facefusion.globals.face_recognition else None
+	result_frame = process_frame(source_face, reference_face, target_frame)
+	cv2.imwrite(output_path, result_frame)
+
+
+def process_video(source_path : str, temp_frame_paths : List[str]) -> None:
+	conditional_set_face_reference(temp_frame_paths)
+	frame_processors.process_video(source_path, temp_frame_paths, process_frames)
+
+
+def conditional_set_face_reference(temp_frame_paths : List[str]) -> None:
+	if 'reference' in facefusion.globals.face_recognition and not get_face_reference():
+		reference_frame = cv2.imread(temp_frame_paths[facefusion.globals.reference_frame_number])
+		reference_face = get_one_face(reference_frame, facefusion.globals.reference_face_position)
+		set_face_reference(reference_face)
diff --git a/DeepFakeAI/processors/frame/modules/frame_enhancer.py b/DeepFakeAI/processors/frame/modules/frame_enhancer.py
new file mode 100644
index 0000000000000000000000000000000000000000..21f51ab944a8e79f94e348aa46988c5b0cbd9076
--- /dev/null
+++ b/DeepFakeAI/processors/frame/modules/frame_enhancer.py
@@ -0,0 +1,88 @@
+from typing import Any, List, Callable
+import cv2
+import threading
+from basicsr.archs.rrdbnet_arch import RRDBNet
+from realesrgan import RealESRGANer
+
+import facefusion.processors.frame.core as frame_processors
+from facefusion.typing import Frame, Face
+from facefusion.utilities import conditional_download, resolve_relative_path
+
+FRAME_PROCESSOR = None
+THREAD_SEMAPHORE = threading.Semaphore()
+THREAD_LOCK = threading.Lock()
+NAME = 'FACEFUSION.FRAME_PROCESSOR.FRAME_ENHANCER'
+
+
+def get_frame_processor() -> Any:
+	global FRAME_PROCESSOR
+
+	with THREAD_LOCK:
+		if FRAME_PROCESSOR is None:
+			model_path = resolve_relative_path('../.assets/models/RealESRGAN_x4plus.pth')
+			FRAME_PROCESSOR = RealESRGANer(
+				model_path = model_path,
+				model = RRDBNet(
+					num_in_ch = 3,
+					num_out_ch = 3,
+					num_feat = 64,
+					num_block = 23,
+					num_grow_ch = 32,
+					scale = 4
+				),
+				device = frame_processors.get_device(),
+				tile = 512,
+				tile_pad = 32,
+				pre_pad = 0,
+				scale = 4
+			)
+	return FRAME_PROCESSOR
+
+
+def clear_frame_processor() -> None:
+	global FRAME_PROCESSOR
+
+	FRAME_PROCESSOR = None
+
+
+def pre_check() -> bool:
+	download_directory_path = resolve_relative_path('../.assets/models')
+	conditional_download(download_directory_path, ['https://github.com/facefusion/facefusion-assets/releases/download/models/RealESRGAN_x4plus.pth'])
+	return True
+
+
+def pre_process() -> bool:
+	return True
+
+
+def post_process() -> None:
+	clear_frame_processor()
+
+
+def enhance_frame(temp_frame : Frame) -> Frame:
+	with THREAD_SEMAPHORE:
+		temp_frame, _ = get_frame_processor().enhance(temp_frame, outscale = 1)
+	return temp_frame
+
+
+def process_frame(source_face : Face, reference_face : Face, temp_frame : Frame) -> Frame:
+	return enhance_frame(temp_frame)
+
+
+def process_frames(source_path : str, temp_frame_paths : List[str], update: Callable[[], None]) -> None:
+	for temp_frame_path in temp_frame_paths:
+		temp_frame = cv2.imread(temp_frame_path)
+		result_frame = process_frame(None, None, temp_frame)
+		cv2.imwrite(temp_frame_path, result_frame)
+		if update:
+			update()
+
+
+def process_image(source_path : str, target_path : str, output_path : str) -> None:
+	target_frame = cv2.imread(target_path)
+	result = process_frame(None, None, target_frame)
+	cv2.imwrite(output_path, result)
+
+
+def process_video(source_path : str, temp_frame_paths : List[str]) -> None:
+	frame_processors.process_video(None, temp_frame_paths, process_frames)
diff --git a/DeepFakeAI/typing.py b/DeepFakeAI/typing.py
new file mode 100644
index 0000000000000000000000000000000000000000..74f2b8746172ce2d58705f073a45c2276766ce60
--- /dev/null
+++ b/DeepFakeAI/typing.py
@@ -0,0 +1,13 @@
+from typing import Any, Literal
+from insightface.app.common import Face
+import numpy
+
+Face = Face
+Frame = numpy.ndarray[Any, Any]
+
+FaceRecognition = Literal[ 'reference', 'many' ]
+FaceAnalyserDirection = Literal[ 'left-right', 'right-left', 'top-bottom', 'bottom-top', 'small-large', 'large-small' ]
+FaceAnalyserAge = Literal[ 'child', 'teen', 'adult', 'senior' ]
+FaceAnalyserGender = Literal[ 'male', 'female' ]
+TempFrameFormat = Literal[ 'jpg', 'png' ]
+OutputVideoEncoder = Literal[ 'libx264', 'libx265', 'libvpx-vp9', 'h264_nvenc', 'hevc_nvenc' ]
diff --git a/DeepFakeAI/uis/__init__.py b/DeepFakeAI/uis/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/DeepFakeAI/uis/__pycache__/__init__.cpython-310.pyc b/DeepFakeAI/uis/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..88430855f8a7e101b63ccd87cc52762c294c0764
Binary files /dev/null and b/DeepFakeAI/uis/__pycache__/__init__.cpython-310.pyc differ
diff --git a/DeepFakeAI/uis/__pycache__/core.cpython-310.pyc b/DeepFakeAI/uis/__pycache__/core.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e540a0b285a982ad80e719250e7533852c1e230e
Binary files /dev/null and b/DeepFakeAI/uis/__pycache__/core.cpython-310.pyc differ
diff --git a/DeepFakeAI/uis/__pycache__/typing.cpython-310.pyc b/DeepFakeAI/uis/__pycache__/typing.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9c309212d3fd4ff4717aac2c0abd1c6fd48b482d
Binary files /dev/null and b/DeepFakeAI/uis/__pycache__/typing.cpython-310.pyc differ
diff --git a/DeepFakeAI/uis/components/__init__.py b/DeepFakeAI/uis/components/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/DeepFakeAI/uis/components/__pycache__/__init__.cpython-310.pyc b/DeepFakeAI/uis/components/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..218689ccabd6afffc212796346d5c3953e4abebe
Binary files /dev/null and b/DeepFakeAI/uis/components/__pycache__/__init__.cpython-310.pyc differ
diff --git a/DeepFakeAI/uis/components/__pycache__/about.cpython-310.pyc b/DeepFakeAI/uis/components/__pycache__/about.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7dac7b1d459f492fe041b50527fb798f4bf8a680
Binary files /dev/null and b/DeepFakeAI/uis/components/__pycache__/about.cpython-310.pyc differ
diff --git a/DeepFakeAI/uis/components/__pycache__/execution.cpython-310.pyc b/DeepFakeAI/uis/components/__pycache__/execution.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..313f2d8fef5006fa7f276e0ac8df65395f569bf8
Binary files /dev/null and b/DeepFakeAI/uis/components/__pycache__/execution.cpython-310.pyc differ
diff --git a/DeepFakeAI/uis/components/__pycache__/face_analyser.cpython-310.pyc b/DeepFakeAI/uis/components/__pycache__/face_analyser.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..73dd9e4add8205157105f8ae03fbd228d299f41c
Binary files /dev/null and b/DeepFakeAI/uis/components/__pycache__/face_analyser.cpython-310.pyc differ
diff --git a/DeepFakeAI/uis/components/__pycache__/face_selector.cpython-310.pyc b/DeepFakeAI/uis/components/__pycache__/face_selector.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a500abd49fc6bed6cbfa537b1618dc87a2b1e3ff
Binary files /dev/null and b/DeepFakeAI/uis/components/__pycache__/face_selector.cpython-310.pyc differ
diff --git a/DeepFakeAI/uis/components/__pycache__/output.cpython-310.pyc b/DeepFakeAI/uis/components/__pycache__/output.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9dc2510cf2b96b90984d973e2e552bfd7365fb06
Binary files /dev/null and b/DeepFakeAI/uis/components/__pycache__/output.cpython-310.pyc differ
diff --git a/DeepFakeAI/uis/components/__pycache__/output_settings.cpython-310.pyc b/DeepFakeAI/uis/components/__pycache__/output_settings.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..983cef80c185ebafaa252c052dc62b9e658c714d
Binary files /dev/null and b/DeepFakeAI/uis/components/__pycache__/output_settings.cpython-310.pyc differ
diff --git a/DeepFakeAI/uis/components/__pycache__/preview.cpython-310.pyc b/DeepFakeAI/uis/components/__pycache__/preview.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4b6891e222567ec066938462796f526016e43a5f
Binary files /dev/null and b/DeepFakeAI/uis/components/__pycache__/preview.cpython-310.pyc differ
diff --git a/DeepFakeAI/uis/components/__pycache__/processors.cpython-310.pyc b/DeepFakeAI/uis/components/__pycache__/processors.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8ac92c9fe9641e37ec3a7c1b066b5c75d2904401
Binary files /dev/null and b/DeepFakeAI/uis/components/__pycache__/processors.cpython-310.pyc differ
diff --git a/DeepFakeAI/uis/components/__pycache__/settings.cpython-310.pyc b/DeepFakeAI/uis/components/__pycache__/settings.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2ab9b19a6293d2dc6ba2a0f1e6953418676494d6
Binary files /dev/null and b/DeepFakeAI/uis/components/__pycache__/settings.cpython-310.pyc differ
diff --git a/DeepFakeAI/uis/components/__pycache__/source.cpython-310.pyc b/DeepFakeAI/uis/components/__pycache__/source.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f71c2e4aa379575ec3c6ebe81e32a0cde1ca821a
Binary files /dev/null and b/DeepFakeAI/uis/components/__pycache__/source.cpython-310.pyc differ
diff --git a/DeepFakeAI/uis/components/__pycache__/target.cpython-310.pyc b/DeepFakeAI/uis/components/__pycache__/target.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ab096ba1051920658b1589104c1adacf5ee0fe03
Binary files /dev/null and b/DeepFakeAI/uis/components/__pycache__/target.cpython-310.pyc differ
diff --git a/DeepFakeAI/uis/components/__pycache__/temp_frame.cpython-310.pyc b/DeepFakeAI/uis/components/__pycache__/temp_frame.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1e075b585ca581f4ac9ca7ee8ae3330dcfb55b51
Binary files /dev/null and b/DeepFakeAI/uis/components/__pycache__/temp_frame.cpython-310.pyc differ
diff --git a/DeepFakeAI/uis/components/__pycache__/trim_frame.cpython-310.pyc b/DeepFakeAI/uis/components/__pycache__/trim_frame.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..24efa8cfb068eb2437deaadd6c459c5b47b1cd4c
Binary files /dev/null and b/DeepFakeAI/uis/components/__pycache__/trim_frame.cpython-310.pyc differ
diff --git a/DeepFakeAI/uis/components/about.py b/DeepFakeAI/uis/components/about.py
new file mode 100644
index 0000000000000000000000000000000000000000..e6caffaf324d8e12dceaaeca2528ee4ecc01ecf8
--- /dev/null
+++ b/DeepFakeAI/uis/components/about.py
@@ -0,0 +1,13 @@
+from typing import Optional
+import gradio
+
+from facefusion import metadata
+
+ABOUT_HTML : Optional[gradio.HTML] = None
+
+
+def render() -> None:
+	global ABOUT_HTML
+
+	with gradio.Box():
+		ABOUT_HTML = gradio.HTML('<center><a href="' + metadata.get('url') + '">' + metadata.get('name') + ' ' + metadata.get('version') + '</a></center>')
diff --git a/DeepFakeAI/uis/components/benchmark.py b/DeepFakeAI/uis/components/benchmark.py
new file mode 100644
index 0000000000000000000000000000000000000000..b56958e29169d1f0510da3a7a8a1704ce8542a29
--- /dev/null
+++ b/DeepFakeAI/uis/components/benchmark.py
@@ -0,0 +1,116 @@
+from typing import Any, Optional, List
+import time
+import tempfile
+import statistics
+import gradio
+
+import facefusion.globals
+from facefusion import wording
+from facefusion.capturer import get_video_frame_total
+from facefusion.core import conditional_process
+from facefusion.uis.typing import Update
+from facefusion.utilities import normalize_output_path, clear_temp
+
+BENCHMARK_RESULT_DATAFRAME : Optional[gradio.Dataframe] = None
+BENCHMARK_CYCLES_SLIDER : Optional[gradio.Button] = None
+BENCHMARK_START_BUTTON : Optional[gradio.Button] = None
+BENCHMARK_CLEAR_BUTTON : Optional[gradio.Button] = None
+
+
+def render() -> None:
+	global BENCHMARK_RESULT_DATAFRAME
+	global BENCHMARK_CYCLES_SLIDER
+	global BENCHMARK_START_BUTTON
+	global BENCHMARK_CLEAR_BUTTON
+
+	with gradio.Box():
+		BENCHMARK_RESULT_DATAFRAME = gradio.Dataframe(
+			label = wording.get('benchmark_result_dataframe_label'),
+			headers =
+			[
+				'target_path',
+				'benchmark_cycles',
+				'average_run',
+				'fastest_run',
+				'slowest_run',
+				'relative_fps'
+			],
+			col_count = (6, 'fixed'),
+			row_count = (7, 'fixed'),
+			datatype =
+			[
+				'str',
+				'number',
+				'number',
+				'number',
+				'number',
+				'number'
+			]
+		)
+	BENCHMARK_CYCLES_SLIDER = gradio.Slider(
+		label = wording.get('benchmark_cycles_slider_label'),
+		minimum = 1,
+		step = 1,
+		value = 3,
+		maximum = 10
+	)
+	with gradio.Row():
+		BENCHMARK_START_BUTTON = gradio.Button(wording.get('start_button_label'))
+		BENCHMARK_CLEAR_BUTTON = gradio.Button(wording.get('clear_button_label'))
+
+
+def listen() -> None:
+	BENCHMARK_START_BUTTON.click(update, inputs = BENCHMARK_CYCLES_SLIDER, outputs = BENCHMARK_RESULT_DATAFRAME)
+	BENCHMARK_CLEAR_BUTTON.click(clear, outputs = BENCHMARK_RESULT_DATAFRAME)
+
+
+def update(benchmark_cycles : int) -> Update:
+	facefusion.globals.source_path = '.assets/examples/source.jpg'
+	target_paths =\
+	[
+		'.assets/examples/target-240p.mp4',
+		'.assets/examples/target-360p.mp4',
+		'.assets/examples/target-540p.mp4',
+		'.assets/examples/target-720p.mp4',
+		'.assets/examples/target-1080p.mp4',
+		'.assets/examples/target-1440p.mp4',
+		'.assets/examples/target-2160p.mp4'
+	]
+	value = [ benchmark(target_path, benchmark_cycles) for target_path in target_paths ]
+	return gradio.update(value = value)
+
+
+def benchmark(target_path : str, benchmark_cycles : int) -> List[Any]:
+	process_times = []
+	total_fps = 0.0
+	for i in range(benchmark_cycles + 1):
+		facefusion.globals.target_path = target_path
+		facefusion.globals.output_path = normalize_output_path(facefusion.globals.source_path, facefusion.globals.target_path, tempfile.gettempdir())
+		video_frame_total = get_video_frame_total(facefusion.globals.target_path)
+		start_time = time.perf_counter()
+		conditional_process()
+		end_time = time.perf_counter()
+		process_time = end_time - start_time
+		fps = video_frame_total / process_time
+		if i > 0:
+			process_times.append(process_time)
+			total_fps += fps
+	average_run = round(statistics.mean(process_times), 2)
+	fastest_run = round(min(process_times), 2)
+	slowest_run = round(max(process_times), 2)
+	relative_fps = round(total_fps / benchmark_cycles, 2)
+	return\
+	[
+		facefusion.globals.target_path,
+		benchmark_cycles,
+		average_run,
+		fastest_run,
+		slowest_run,
+		relative_fps
+	]
+
+
+def clear() -> Update:
+	if facefusion.globals.target_path:
+		clear_temp(facefusion.globals.target_path)
+	return gradio.update(value = None)
diff --git a/DeepFakeAI/uis/components/execution.py b/DeepFakeAI/uis/components/execution.py
new file mode 100644
index 0000000000000000000000000000000000000000..78abc1dfe2a813d8c747d975089191243c560541
--- /dev/null
+++ b/DeepFakeAI/uis/components/execution.py
@@ -0,0 +1,64 @@
+from typing import List, Optional
+import gradio
+import onnxruntime
+
+import facefusion.globals
+from facefusion import wording
+from facefusion.face_analyser import clear_face_analyser
+from facefusion.processors.frame.core import clear_frame_processors_modules
+from facefusion.uis.typing import Update
+from facefusion.utilities import encode_execution_providers, decode_execution_providers
+
+EXECUTION_PROVIDERS_CHECKBOX_GROUP : Optional[gradio.CheckboxGroup] = None
+EXECUTION_THREAD_COUNT_SLIDER : Optional[gradio.Slider] = None
+EXECUTION_QUEUE_COUNT_SLIDER : Optional[gradio.Slider] = None
+
+
+def render() -> None:
+	global EXECUTION_PROVIDERS_CHECKBOX_GROUP
+	global EXECUTION_THREAD_COUNT_SLIDER
+	global EXECUTION_QUEUE_COUNT_SLIDER
+
+	with gradio.Box():
+		EXECUTION_PROVIDERS_CHECKBOX_GROUP = gradio.CheckboxGroup(
+			label = wording.get('execution_providers_checkbox_group_label'),
+			choices = encode_execution_providers(onnxruntime.get_available_providers()),
+			value = encode_execution_providers(facefusion.globals.execution_providers)
+		)
+		EXECUTION_THREAD_COUNT_SLIDER = gradio.Slider(
+			label = wording.get('execution_thread_count_slider_label'),
+			value = facefusion.globals.execution_thread_count,
+			step = 1,
+			minimum = 1,
+			maximum = 128
+		)
+		EXECUTION_QUEUE_COUNT_SLIDER = gradio.Slider(
+			label = wording.get('execution_queue_count_slider_label'),
+			value = facefusion.globals.execution_queue_count,
+			step = 1,
+			minimum = 1,
+			maximum = 16
+		)
+
+
+def listen() -> None:
+	EXECUTION_PROVIDERS_CHECKBOX_GROUP.change(update_execution_providers, inputs = EXECUTION_PROVIDERS_CHECKBOX_GROUP, outputs = EXECUTION_PROVIDERS_CHECKBOX_GROUP)
+	EXECUTION_THREAD_COUNT_SLIDER.change(update_execution_thread_count, inputs = EXECUTION_THREAD_COUNT_SLIDER, outputs = EXECUTION_THREAD_COUNT_SLIDER)
+	EXECUTION_QUEUE_COUNT_SLIDER.change(update_execution_queue_count, inputs = EXECUTION_QUEUE_COUNT_SLIDER, outputs = EXECUTION_QUEUE_COUNT_SLIDER)
+
+
+def update_execution_providers(execution_providers : List[str]) -> Update:
+	clear_face_analyser()
+	clear_frame_processors_modules()
+	facefusion.globals.execution_providers = decode_execution_providers(execution_providers)
+	return gradio.update(value = execution_providers)
+
+
+def update_execution_thread_count(execution_thread_count : int = 1) -> Update:
+	facefusion.globals.execution_thread_count = execution_thread_count
+	return gradio.update(value = execution_thread_count)
+
+
+def update_execution_queue_count(execution_queue_count : int = 1) -> Update:
+	facefusion.globals.execution_queue_count = execution_queue_count
+	return gradio.update(value = execution_queue_count)
diff --git a/DeepFakeAI/uis/components/face_analyser.py b/DeepFakeAI/uis/components/face_analyser.py
new file mode 100644
index 0000000000000000000000000000000000000000..6b96a313de06c8194ef064eeb42f90702732aa20
--- /dev/null
+++ b/DeepFakeAI/uis/components/face_analyser.py
@@ -0,0 +1,54 @@
+from typing import Optional
+
+import gradio
+
+import facefusion.choices
+import facefusion.globals
+from facefusion import wording
+from facefusion.uis import core as ui
+from facefusion.uis.typing import Update
+
+FACE_ANALYSER_DIRECTION_DROPDOWN : Optional[gradio.Dropdown] = None
+FACE_ANALYSER_AGE_DROPDOWN : Optional[gradio.Dropdown] = None
+FACE_ANALYSER_GENDER_DROPDOWN : Optional[gradio.Dropdown] = None
+
+
+def render() -> None:
+	global FACE_ANALYSER_DIRECTION_DROPDOWN
+	global FACE_ANALYSER_AGE_DROPDOWN
+	global FACE_ANALYSER_GENDER_DROPDOWN
+
+	with gradio.Box():
+		with gradio.Row():
+			FACE_ANALYSER_DIRECTION_DROPDOWN = gradio.Dropdown(
+				label = wording.get('face_analyser_direction_dropdown_label'),
+				choices = facefusion.choices.face_analyser_direction,
+				value = facefusion.globals.face_analyser_direction
+			)
+			FACE_ANALYSER_AGE_DROPDOWN = gradio.Dropdown(
+				label = wording.get('face_analyser_age_dropdown_label'),
+				choices = ['none'] + facefusion.choices.face_analyser_age,
+				value = facefusion.globals.face_analyser_age or 'none'
+			)
+			FACE_ANALYSER_GENDER_DROPDOWN = gradio.Dropdown(
+				label = wording.get('face_analyser_gender_dropdown_label'),
+				choices = ['none'] + facefusion.choices.face_analyser_gender,
+				value = facefusion.globals.face_analyser_gender or 'none'
+			)
+		ui.register_component('face_analyser_direction_dropdown', FACE_ANALYSER_DIRECTION_DROPDOWN)
+		ui.register_component('face_analyser_age_dropdown', FACE_ANALYSER_AGE_DROPDOWN)
+		ui.register_component('face_analyser_gender_dropdown', FACE_ANALYSER_GENDER_DROPDOWN)
+
+
+def listen() -> None:
+	FACE_ANALYSER_DIRECTION_DROPDOWN.select(lambda value: update_dropdown('face_analyser_direction', value), inputs = FACE_ANALYSER_DIRECTION_DROPDOWN, outputs = FACE_ANALYSER_DIRECTION_DROPDOWN)
+	FACE_ANALYSER_AGE_DROPDOWN.select(lambda value: update_dropdown('face_analyser_age', value), inputs = FACE_ANALYSER_AGE_DROPDOWN, outputs = FACE_ANALYSER_AGE_DROPDOWN)
+	FACE_ANALYSER_GENDER_DROPDOWN.select(lambda value: update_dropdown('face_analyser_gender', value), inputs = FACE_ANALYSER_GENDER_DROPDOWN, outputs = FACE_ANALYSER_GENDER_DROPDOWN)
+
+
+def update_dropdown(name : str, value : str) -> Update:
+	if value == 'none':
+		setattr(facefusion.globals, name, None)
+	else:
+		setattr(facefusion.globals, name, value)
+	return gradio.update(value = value)
diff --git a/DeepFakeAI/uis/components/face_selector.py b/DeepFakeAI/uis/components/face_selector.py
new file mode 100644
index 0000000000000000000000000000000000000000..58dc44df8dda93790a566abafcb439fb718911b8
--- /dev/null
+++ b/DeepFakeAI/uis/components/face_selector.py
@@ -0,0 +1,133 @@
+from typing import List, Optional, Tuple, Any, Dict
+from time import sleep
+
+import cv2
+import gradio
+
+import facefusion.choices
+import facefusion.globals
+from facefusion import wording
+from facefusion.capturer import get_video_frame
+from facefusion.face_analyser import get_many_faces
+from facefusion.face_reference import clear_face_reference
+from facefusion.typing import Frame, FaceRecognition
+from facefusion.uis import core as ui
+from facefusion.uis.typing import ComponentName, Update
+from facefusion.utilities import is_image, is_video
+
+FACE_RECOGNITION_DROPDOWN : Optional[gradio.Dropdown] = None
+REFERENCE_FACE_POSITION_GALLERY : Optional[gradio.Gallery] = None
+REFERENCE_FACE_DISTANCE_SLIDER : Optional[gradio.Slider] = None
+
+
+def render() -> None:
+	global FACE_RECOGNITION_DROPDOWN
+	global REFERENCE_FACE_POSITION_GALLERY
+	global REFERENCE_FACE_DISTANCE_SLIDER
+
+	with gradio.Box():
+		reference_face_gallery_args: Dict[str, Any] = {
+			'label': wording.get('reference_face_gallery_label'),
+			'height': 120,
+			'object_fit': 'cover',
+			'columns': 10,
+			'allow_preview': False,
+			'visible': 'reference' in facefusion.globals.face_recognition
+		}
+		if is_image(facefusion.globals.target_path):
+			reference_frame = cv2.imread(facefusion.globals.target_path)
+			reference_face_gallery_args['value'] = extract_gallery_frames(reference_frame)
+		if is_video(facefusion.globals.target_path):
+			reference_frame = get_video_frame(facefusion.globals.target_path, facefusion.globals.reference_frame_number)
+			reference_face_gallery_args['value'] = extract_gallery_frames(reference_frame)
+		FACE_RECOGNITION_DROPDOWN = gradio.Dropdown(
+			label = wording.get('face_recognition_dropdown_label'),
+			choices = facefusion.choices.face_recognition,
+			value = facefusion.globals.face_recognition
+		)
+		REFERENCE_FACE_POSITION_GALLERY = gradio.Gallery(**reference_face_gallery_args)
+		REFERENCE_FACE_DISTANCE_SLIDER = gradio.Slider(
+			label = wording.get('reference_face_distance_slider_label'),
+			value = facefusion.globals.reference_face_distance,
+			maximum = 3,
+			step = 0.05,
+			visible = 'reference' in facefusion.globals.face_recognition
+		)
+		ui.register_component('face_recognition_dropdown', FACE_RECOGNITION_DROPDOWN)
+		ui.register_component('reference_face_position_gallery', REFERENCE_FACE_POSITION_GALLERY)
+		ui.register_component('reference_face_distance_slider', REFERENCE_FACE_DISTANCE_SLIDER)
+
+
+def listen() -> None:
+	FACE_RECOGNITION_DROPDOWN.select(update_face_recognition, inputs = FACE_RECOGNITION_DROPDOWN, outputs = [ REFERENCE_FACE_POSITION_GALLERY, REFERENCE_FACE_DISTANCE_SLIDER ])
+	REFERENCE_FACE_POSITION_GALLERY.select(clear_and_update_face_reference_position)
+	REFERENCE_FACE_DISTANCE_SLIDER.change(update_reference_face_distance, inputs = REFERENCE_FACE_DISTANCE_SLIDER)
+	update_component_names : List[ComponentName] =\
+	[
+		'target_file',
+		'preview_frame_slider'
+	]
+	for component_name in update_component_names:
+		component = ui.get_component(component_name)
+		if component:
+			component.change(update_face_reference_position, outputs = REFERENCE_FACE_POSITION_GALLERY)
+	select_component_names : List[ComponentName] =\
+	[
+		'face_analyser_direction_dropdown',
+		'face_analyser_age_dropdown',
+		'face_analyser_gender_dropdown'
+	]
+	for component_name in select_component_names:
+		component = ui.get_component(component_name)
+		if component:
+			component.select(update_face_reference_position, outputs = REFERENCE_FACE_POSITION_GALLERY)
+
+
+def update_face_recognition(face_recognition : FaceRecognition) -> Tuple[Update, Update]:
+	if face_recognition == 'reference':
+		facefusion.globals.face_recognition = face_recognition
+		return gradio.update(visible = True), gradio.update(visible = True)
+	if face_recognition == 'many':
+		facefusion.globals.face_recognition = face_recognition
+		return gradio.update(visible = False), gradio.update(visible = False)
+
+
+def clear_and_update_face_reference_position(event: gradio.SelectData) -> Update:
+	clear_face_reference()
+	return update_face_reference_position(event.index)
+
+
+def update_face_reference_position(reference_face_position : int = 0) -> Update:
+	sleep(0.2)
+	gallery_frames = []
+	facefusion.globals.reference_face_position = reference_face_position
+	if is_image(facefusion.globals.target_path):
+		reference_frame = cv2.imread(facefusion.globals.target_path)
+		gallery_frames = extract_gallery_frames(reference_frame)
+	if is_video(facefusion.globals.target_path):
+		reference_frame = get_video_frame(facefusion.globals.target_path, facefusion.globals.reference_frame_number)
+		gallery_frames = extract_gallery_frames(reference_frame)
+	if gallery_frames:
+		return gradio.update(value = gallery_frames)
+	return gradio.update(value = None)
+
+
+def update_reference_face_distance(reference_face_distance : float) -> Update:
+	facefusion.globals.reference_face_distance = reference_face_distance
+	return gradio.update(value = reference_face_distance)
+
+
+def extract_gallery_frames(reference_frame : Frame) -> List[Frame]:
+	crop_frames = []
+	faces = get_many_faces(reference_frame)
+	for face in faces:
+		start_x, start_y, end_x, end_y = map(int, face['bbox'])
+		padding_x = int((end_x - start_x) * 0.25)
+		padding_y = int((end_y - start_y) * 0.25)
+		start_x = max(0, start_x - padding_x)
+		start_y = max(0, start_y - padding_y)
+		end_x = max(0, end_x + padding_x)
+		end_y = max(0, end_y + padding_y)
+		crop_frame = reference_frame[start_y:end_y, start_x:end_x]
+		crop_frames.append(ui.normalize_frame(crop_frame))
+	return crop_frames
diff --git a/DeepFakeAI/uis/components/output.py b/DeepFakeAI/uis/components/output.py
new file mode 100644
index 0000000000000000000000000000000000000000..78655f5ee36bdf2f1114b1afbc1563abaaea8d1e
--- /dev/null
+++ b/DeepFakeAI/uis/components/output.py
@@ -0,0 +1,55 @@
+from typing import Tuple, Optional
+import gradio
+
+import facefusion.globals
+from facefusion import wording
+from facefusion.core import conditional_process
+from facefusion.uis.typing import Update
+from facefusion.utilities import is_image, is_video, normalize_output_path, clear_temp
+
+OUTPUT_START_BUTTON : Optional[gradio.Button] = None
+OUTPUT_CLEAR_BUTTON : Optional[gradio.Button] = None
+OUTPUT_IMAGE : Optional[gradio.Image] = None
+OUTPUT_VIDEO : Optional[gradio.Video] = None
+
+
+def render() -> None:
+	global OUTPUT_START_BUTTON
+	global OUTPUT_CLEAR_BUTTON
+	global OUTPUT_IMAGE
+	global OUTPUT_VIDEO
+
+	with gradio.Row():
+		with gradio.Box():
+			OUTPUT_IMAGE = gradio.Image(
+				label = wording.get('output_image_or_video_label'),
+				visible = False
+			)
+			OUTPUT_VIDEO = gradio.Video(
+				label = wording.get('output_image_or_video_label')
+			)
+	with gradio.Row():
+		OUTPUT_START_BUTTON = gradio.Button(wording.get('start_button_label'))
+		OUTPUT_CLEAR_BUTTON = gradio.Button(wording.get('clear_button_label'))
+
+
+def listen() -> None:
+	OUTPUT_START_BUTTON.click(update, outputs = [ OUTPUT_IMAGE, OUTPUT_VIDEO ])
+	OUTPUT_CLEAR_BUTTON.click(clear, outputs = [ OUTPUT_IMAGE, OUTPUT_VIDEO ])
+
+
+def update() -> Tuple[Update, Update]:
+	facefusion.globals.output_path = normalize_output_path(facefusion.globals.source_path, facefusion.globals.target_path, '.')
+	if facefusion.globals.output_path:
+		conditional_process()
+		if is_image(facefusion.globals.output_path):
+			return gradio.update(value = facefusion.globals.output_path, visible = True), gradio.update(value = None, visible = False)
+		if is_video(facefusion.globals.output_path):
+			return gradio.update(value = None, visible = False), gradio.update(value = facefusion.globals.output_path, visible = True)
+	return gradio.update(value = None, visible = False), gradio.update(value = None, visible = False)
+
+
+def clear() -> Tuple[Update, Update]:
+	if facefusion.globals.target_path:
+		clear_temp(facefusion.globals.target_path)
+	return gradio.update(value = None), gradio.update(value = None)
diff --git a/DeepFakeAI/uis/components/output_settings.py b/DeepFakeAI/uis/components/output_settings.py
new file mode 100644
index 0000000000000000000000000000000000000000..257abbb121b75b67f0f9f225fd29ec6d7c832ba5
--- /dev/null
+++ b/DeepFakeAI/uis/components/output_settings.py
@@ -0,0 +1,43 @@
+from typing import Optional
+import gradio
+
+import facefusion.choices
+import facefusion.globals
+from facefusion import wording
+from facefusion.typing import OutputVideoEncoder
+from facefusion.uis.typing import Update
+
+OUTPUT_VIDEO_ENCODER_DROPDOWN : Optional[gradio.Dropdown] = None
+OUTPUT_VIDEO_QUALITY_SLIDER : Optional[gradio.Slider] = None
+
+
+def render() -> None:
+	global OUTPUT_VIDEO_ENCODER_DROPDOWN
+	global OUTPUT_VIDEO_QUALITY_SLIDER
+
+	with gradio.Box():
+		OUTPUT_VIDEO_ENCODER_DROPDOWN = gradio.Dropdown(
+			label = wording.get('output_video_encoder_dropdown_label'),
+			choices = facefusion.choices.output_video_encoder,
+			value = facefusion.globals.output_video_encoder
+		)
+		OUTPUT_VIDEO_QUALITY_SLIDER = gradio.Slider(
+			label = wording.get('output_video_quality_slider_label'),
+			value = facefusion.globals.output_video_quality,
+			step = 1
+		)
+
+
+def listen() -> None:
+	OUTPUT_VIDEO_ENCODER_DROPDOWN.select(update_output_video_encoder, inputs = OUTPUT_VIDEO_ENCODER_DROPDOWN, outputs = OUTPUT_VIDEO_ENCODER_DROPDOWN)
+	OUTPUT_VIDEO_QUALITY_SLIDER.change(update_output_video_quality, inputs = OUTPUT_VIDEO_QUALITY_SLIDER, outputs = OUTPUT_VIDEO_QUALITY_SLIDER)
+
+
+def update_output_video_encoder(output_video_encoder: OutputVideoEncoder) -> Update:
+	facefusion.globals.output_video_encoder = output_video_encoder
+	return gradio.update(value = output_video_encoder)
+
+
+def update_output_video_quality(output_video_quality : int) -> Update:
+	facefusion.globals.output_video_quality = output_video_quality
+	return gradio.update(value = output_video_quality)
diff --git a/DeepFakeAI/uis/components/preview.py b/DeepFakeAI/uis/components/preview.py
new file mode 100644
index 0000000000000000000000000000000000000000..9c0f013d8486107ad6c07008fdec218728797e62
--- /dev/null
+++ b/DeepFakeAI/uis/components/preview.py
@@ -0,0 +1,121 @@
+from time import sleep
+from typing import Any, Dict, Tuple, List, Optional
+import cv2
+import gradio
+
+import facefusion.globals
+from facefusion import wording
+from facefusion.capturer import get_video_frame, get_video_frame_total
+from facefusion.face_analyser import get_one_face
+from facefusion.face_reference import get_face_reference, set_face_reference
+from facefusion.predictor import predict_frame
+from facefusion.processors.frame.core import load_frame_processor_module
+from facefusion.typing import Frame
+from facefusion.uis import core as ui
+from facefusion.uis.typing import ComponentName, Update
+from facefusion.utilities import is_video, is_image
+
+PREVIEW_IMAGE : Optional[gradio.Image] = None
+PREVIEW_FRAME_SLIDER : Optional[gradio.Slider] = None
+
+
+def render() -> None:
+	global PREVIEW_IMAGE
+	global PREVIEW_FRAME_SLIDER
+
+	with gradio.Box():
+		preview_image_args: Dict[str, Any] = {
+			'label': wording.get('preview_image_label')
+		}
+		preview_frame_slider_args: Dict[str, Any] = {
+			'label': wording.get('preview_frame_slider_label'),
+			'step': 1,
+			'visible': False
+		}
+		if is_image(facefusion.globals.target_path):
+			target_frame = cv2.imread(facefusion.globals.target_path)
+			preview_frame = extract_preview_frame(target_frame)
+			preview_image_args['value'] = ui.normalize_frame(preview_frame)
+		if is_video(facefusion.globals.target_path):
+			temp_frame = get_video_frame(facefusion.globals.target_path, facefusion.globals.reference_frame_number)
+			preview_frame = extract_preview_frame(temp_frame)
+			preview_image_args['value'] = ui.normalize_frame(preview_frame)
+			preview_image_args['visible'] = True
+			preview_frame_slider_args['value'] = facefusion.globals.reference_frame_number
+			preview_frame_slider_args['maximum'] = get_video_frame_total(facefusion.globals.target_path)
+			preview_frame_slider_args['visible'] = True
+		PREVIEW_IMAGE = gradio.Image(**preview_image_args)
+		PREVIEW_FRAME_SLIDER = gradio.Slider(**preview_frame_slider_args)
+		ui.register_component('preview_frame_slider', PREVIEW_FRAME_SLIDER)
+
+
+def listen() -> None:
+	PREVIEW_FRAME_SLIDER.change(update, inputs = PREVIEW_FRAME_SLIDER, outputs = [ PREVIEW_IMAGE, PREVIEW_FRAME_SLIDER ])
+	update_component_names : List[ComponentName] =\
+	[
+		'source_file',
+		'target_file',
+		'face_recognition_dropdown',
+		'reference_face_distance_slider',
+		'frame_processors_checkbox_group'
+	]
+	for component_name in update_component_names:
+		component = ui.get_component(component_name)
+		if component:
+			component.change(update, inputs = PREVIEW_FRAME_SLIDER, outputs = [ PREVIEW_IMAGE, PREVIEW_FRAME_SLIDER ])
+	select_component_names : List[ComponentName] =\
+	[
+		'reference_face_position_gallery',
+		'face_analyser_direction_dropdown',
+		'face_analyser_age_dropdown',
+		'face_analyser_gender_dropdown'
+	]
+	for component_name in select_component_names:
+		component = ui.get_component(component_name)
+		if component:
+			component.select(update, inputs = PREVIEW_FRAME_SLIDER, outputs = [ PREVIEW_IMAGE, PREVIEW_FRAME_SLIDER ])
+
+
+def update(frame_number : int = 0) -> Tuple[Update, Update]:
+	sleep(0.1)
+	if is_image(facefusion.globals.target_path):
+		target_frame = cv2.imread(facefusion.globals.target_path)
+		preview_frame = extract_preview_frame(target_frame)
+		return gradio.update(value = ui.normalize_frame(preview_frame)), gradio.update(value = None, maximum = None, visible = False)
+	if is_video(facefusion.globals.target_path):
+		facefusion.globals.reference_frame_number = frame_number
+		video_frame_total = get_video_frame_total(facefusion.globals.target_path)
+		temp_frame = get_video_frame(facefusion.globals.target_path, facefusion.globals.reference_frame_number)
+		preview_frame = extract_preview_frame(temp_frame)
+		return gradio.update(value = ui.normalize_frame(preview_frame)), gradio.update(maximum = video_frame_total, visible = True)
+	return gradio.update(value = None), gradio.update(value = None, maximum = None, visible = False)
+
+
+def extract_preview_frame(temp_frame : Frame) -> Frame:
+	if predict_frame(temp_frame):
+		return cv2.GaussianBlur(temp_frame, (99, 99), 0)
+	source_face = get_one_face(cv2.imread(facefusion.globals.source_path)) if facefusion.globals.source_path else None
+	temp_frame = reduce_preview_frame(temp_frame)
+	if 'reference' in facefusion.globals.face_recognition and not get_face_reference():
+		reference_frame = get_video_frame(facefusion.globals.target_path, facefusion.globals.reference_frame_number)
+		reference_face = get_one_face(reference_frame, facefusion.globals.reference_face_position)
+		set_face_reference(reference_face)
+	reference_face = get_face_reference() if 'reference' in facefusion.globals.face_recognition else None
+	for frame_processor in facefusion.globals.frame_processors:
+		frame_processor_module = load_frame_processor_module(frame_processor)
+		if frame_processor_module.pre_process():
+			temp_frame = frame_processor_module.process_frame(
+				source_face,
+				reference_face,
+				temp_frame
+			)
+	return temp_frame
+
+
+def reduce_preview_frame(temp_frame : Frame, max_height : int = 480) -> Frame:
+	height, width = temp_frame.shape[:2]
+	if height > max_height:
+		scale = max_height / height
+		max_width = int(width * scale)
+		temp_frame = cv2.resize(temp_frame, (max_width, max_height))
+	return temp_frame
diff --git a/DeepFakeAI/uis/components/processors.py b/DeepFakeAI/uis/components/processors.py
new file mode 100644
index 0000000000000000000000000000000000000000..5f97b5743e5a87179c2bc1a0fc83bdd6f7c06466
--- /dev/null
+++ b/DeepFakeAI/uis/components/processors.py
@@ -0,0 +1,41 @@
+from typing import List, Optional
+import gradio
+
+import facefusion.globals
+from facefusion import wording
+from facefusion.processors.frame.core import load_frame_processor_module, clear_frame_processors_modules
+from facefusion.uis import core as ui
+from facefusion.uis.typing import Update
+from facefusion.utilities import list_module_names
+
+FRAME_PROCESSORS_CHECKBOX_GROUP : Optional[gradio.CheckboxGroup] = None
+
+
+def render() -> None:
+	global FRAME_PROCESSORS_CHECKBOX_GROUP
+
+	with gradio.Box():
+		FRAME_PROCESSORS_CHECKBOX_GROUP = gradio.CheckboxGroup(
+			label = wording.get('frame_processors_checkbox_group_label'),
+			choices = sort_frame_processors(facefusion.globals.frame_processors),
+			value = facefusion.globals.frame_processors
+		)
+		ui.register_component('frame_processors_checkbox_group', FRAME_PROCESSORS_CHECKBOX_GROUP)
+
+
+def listen() -> None:
+	FRAME_PROCESSORS_CHECKBOX_GROUP.change(update_frame_processors, inputs = FRAME_PROCESSORS_CHECKBOX_GROUP, outputs = FRAME_PROCESSORS_CHECKBOX_GROUP)
+
+
+def update_frame_processors(frame_processors : List[str]) -> Update:
+	clear_frame_processors_modules()
+	facefusion.globals.frame_processors = frame_processors
+	for frame_processor in facefusion.globals.frame_processors:
+		frame_processor_module = load_frame_processor_module(frame_processor)
+		frame_processor_module.pre_check()
+	return gradio.update(value = frame_processors, choices = sort_frame_processors(frame_processors))
+
+
+def sort_frame_processors(frame_processors : List[str]) -> list[str]:
+	frame_processors_names = list_module_names('facefusion/processors/frame/modules')
+	return sorted(frame_processors_names, key = lambda frame_processor : frame_processors.index(frame_processor) if frame_processor in frame_processors else len(frame_processors))
diff --git a/DeepFakeAI/uis/components/settings.py b/DeepFakeAI/uis/components/settings.py
new file mode 100644
index 0000000000000000000000000000000000000000..0f0efe087c111b505e68415ed50314c01ffce899
--- /dev/null
+++ b/DeepFakeAI/uis/components/settings.py
@@ -0,0 +1,41 @@
+from typing import Optional
+import gradio
+
+import facefusion.globals
+from facefusion import wording
+from facefusion.uis.typing import Update
+
+KEEP_FPS_CHECKBOX : Optional[gradio.Checkbox] = None
+KEEP_TEMP_CHECKBOX : Optional[gradio.Checkbox] = None
+SKIP_AUDIO_CHECKBOX : Optional[gradio.Checkbox] = None
+
+
+def render() -> None:
+	global KEEP_FPS_CHECKBOX
+	global KEEP_TEMP_CHECKBOX
+	global SKIP_AUDIO_CHECKBOX
+
+	with gradio.Box():
+		KEEP_FPS_CHECKBOX = gradio.Checkbox(
+			label = wording.get('keep_fps_checkbox_label'),
+			value = facefusion.globals.keep_fps
+		)
+		KEEP_TEMP_CHECKBOX = gradio.Checkbox(
+			label = wording.get('keep_temp_checkbox_label'),
+			value = facefusion.globals.keep_temp
+		)
+		SKIP_AUDIO_CHECKBOX = gradio.Checkbox(
+			label = wording.get('skip_audio_checkbox_label'),
+			value = facefusion.globals.skip_audio
+		)
+
+
+def listen() -> None:
+	KEEP_FPS_CHECKBOX.change(lambda value: update_checkbox('keep_fps', value), inputs = KEEP_FPS_CHECKBOX, outputs = KEEP_FPS_CHECKBOX)
+	KEEP_TEMP_CHECKBOX.change(lambda value: update_checkbox('keep_temp', value), inputs = KEEP_TEMP_CHECKBOX, outputs = KEEP_TEMP_CHECKBOX)
+	SKIP_AUDIO_CHECKBOX.change(lambda value: update_checkbox('skip_audio', value), inputs = SKIP_AUDIO_CHECKBOX, outputs = SKIP_AUDIO_CHECKBOX)
+
+
+def update_checkbox(name : str, value: bool) -> Update:
+	setattr(facefusion.globals, name, value)
+	return gradio.update(value = value)
diff --git a/DeepFakeAI/uis/components/source.py b/DeepFakeAI/uis/components/source.py
new file mode 100644
index 0000000000000000000000000000000000000000..75b98fe70eacda9f76cfce713e7c6a0e5579eb04
--- /dev/null
+++ b/DeepFakeAI/uis/components/source.py
@@ -0,0 +1,48 @@
+from typing import Any, IO, Optional
+import gradio
+
+import facefusion.globals
+from facefusion import wording
+from facefusion.uis import core as ui
+from facefusion.uis.typing import Update
+from facefusion.utilities import is_image
+
+SOURCE_FILE : Optional[gradio.File] = None
+SOURCE_IMAGE : Optional[gradio.Image] = None
+
+
+def render() -> None:
+	global SOURCE_FILE
+	global SOURCE_IMAGE
+
+	with gradio.Box():
+		is_source_image = is_image(facefusion.globals.source_path)
+		SOURCE_FILE = gradio.File(
+			file_count = 'single',
+			file_types=
+			[
+				'.png',
+				'.jpg',
+				'.webp'
+			],
+			label = wording.get('source_file_label'),
+			value = facefusion.globals.source_path if is_source_image else None
+		)
+		ui.register_component('source_file', SOURCE_FILE)
+		SOURCE_IMAGE = gradio.Image(
+			value = SOURCE_FILE.value['name'] if is_source_image else None,
+			visible = is_source_image,
+			show_label = False
+		)
+
+
+def listen() -> None:
+	SOURCE_FILE.change(update, inputs = SOURCE_FILE, outputs = SOURCE_IMAGE)
+
+
+def update(file: IO[Any]) -> Update:
+	if file and is_image(file.name):
+		facefusion.globals.source_path = file.name
+		return gradio.update(value = file.name, visible = True)
+	facefusion.globals.source_path = None
+	return gradio.update(value = None, visible = False)
diff --git a/DeepFakeAI/uis/components/target.py b/DeepFakeAI/uis/components/target.py
new file mode 100644
index 0000000000000000000000000000000000000000..309d7f9a44c385a521f4cfcb347aaa544c651647
--- /dev/null
+++ b/DeepFakeAI/uis/components/target.py
@@ -0,0 +1,62 @@
+from typing import Any, IO, Tuple, Optional
+import gradio
+
+import facefusion.globals
+from facefusion import wording
+from facefusion.face_reference import clear_face_reference
+from facefusion.uis import core as ui
+from facefusion.uis.typing import Update
+from facefusion.utilities import is_image, is_video
+
+TARGET_FILE : Optional[gradio.File] = None
+TARGET_IMAGE : Optional[gradio.Image] = None
+TARGET_VIDEO : Optional[gradio.Video] = None
+
+
+def render() -> None:
+	global TARGET_FILE
+	global TARGET_IMAGE
+	global TARGET_VIDEO
+
+	with gradio.Box():
+		is_target_image = is_image(facefusion.globals.target_path)
+		is_target_video = is_video(facefusion.globals.target_path)
+		TARGET_FILE = gradio.File(
+			label = wording.get('target_file_label'),
+			file_count = 'single',
+			file_types =
+			[
+				'.png',
+				'.jpg',
+				'.webp',
+				'.mp4'
+			],
+			value = facefusion.globals.target_path if is_target_image or is_target_video else None
+		)
+		TARGET_IMAGE = gradio.Image(
+			value = TARGET_FILE.value['name'] if is_target_image else None,
+			visible = is_target_image,
+			show_label = False
+		)
+		TARGET_VIDEO = gradio.Video(
+			value = TARGET_FILE.value['name'] if is_target_video else None,
+			visible = is_target_video,
+			show_label = False
+		)
+		ui.register_component('target_file', TARGET_FILE)
+
+
+def listen() -> None:
+	TARGET_FILE.change(update, inputs = TARGET_FILE, outputs = [ TARGET_IMAGE, TARGET_VIDEO ])
+
+
+def update(file : IO[Any]) -> Tuple[Update, Update]:
+	clear_face_reference()
+	if file and is_image(file.name):
+		facefusion.globals.target_path = file.name
+		return gradio.update(value = file.name, visible = True), gradio.update(value = None, visible = False)
+	if file and is_video(file.name):
+		facefusion.globals.target_path = file.name
+		return gradio.update(value = None, visible = False), gradio.update(value = file.name, visible = True)
+	facefusion.globals.target_path = None
+	return gradio.update(value = None, visible = False), gradio.update(value = None, visible = False)
diff --git a/DeepFakeAI/uis/components/temp_frame.py b/DeepFakeAI/uis/components/temp_frame.py
new file mode 100644
index 0000000000000000000000000000000000000000..9584ec55d5606ddd99e1ca1f1ad21aa33bc4e02c
--- /dev/null
+++ b/DeepFakeAI/uis/components/temp_frame.py
@@ -0,0 +1,44 @@
+from typing import Optional
+import gradio
+
+import facefusion.choices
+import facefusion.globals
+from facefusion import wording
+from facefusion.typing import TempFrameFormat
+
+from facefusion.uis.typing import Update
+
+TEMP_FRAME_FORMAT_DROPDOWN : Optional[gradio.Dropdown] = None
+TEMP_FRAME_QUALITY_SLIDER : Optional[gradio.Slider] = None
+
+
+def render() -> None:
+	global TEMP_FRAME_FORMAT_DROPDOWN
+	global TEMP_FRAME_QUALITY_SLIDER
+
+	with gradio.Box():
+		TEMP_FRAME_FORMAT_DROPDOWN = gradio.Dropdown(
+			label = wording.get('temp_frame_format_dropdown_label'),
+			choices = facefusion.choices.temp_frame_format,
+			value = facefusion.globals.temp_frame_format
+		)
+		TEMP_FRAME_QUALITY_SLIDER = gradio.Slider(
+			label = wording.get('temp_frame_quality_slider_label'),
+			value = facefusion.globals.temp_frame_quality,
+			step = 1
+		)
+
+
+def listen() -> None:
+	TEMP_FRAME_FORMAT_DROPDOWN.select(update_temp_frame_format, inputs = TEMP_FRAME_FORMAT_DROPDOWN, outputs = TEMP_FRAME_FORMAT_DROPDOWN)
+	TEMP_FRAME_QUALITY_SLIDER.change(update_temp_frame_quality, inputs = TEMP_FRAME_QUALITY_SLIDER, outputs = TEMP_FRAME_QUALITY_SLIDER)
+
+
+def update_temp_frame_format(temp_frame_format : TempFrameFormat) -> Update:
+	facefusion.globals.temp_frame_format = temp_frame_format
+	return gradio.update(value = temp_frame_format)
+
+
+def update_temp_frame_quality(temp_frame_quality : int) -> Update:
+	facefusion.globals.temp_frame_quality = temp_frame_quality
+	return gradio.update(value = temp_frame_quality)
diff --git a/DeepFakeAI/uis/components/trim_frame.py b/DeepFakeAI/uis/components/trim_frame.py
new file mode 100644
index 0000000000000000000000000000000000000000..6af4e2255fdabbedc7d92fed5f1982f81488e096
--- /dev/null
+++ b/DeepFakeAI/uis/components/trim_frame.py
@@ -0,0 +1,65 @@
+from time import sleep
+from typing import Any, Dict, Tuple, Optional
+
+import gradio
+
+import facefusion.globals
+from facefusion import wording
+from facefusion.capturer import get_video_frame_total
+from facefusion.uis import core as ui
+from facefusion.uis.typing import Update
+from facefusion.utilities import is_video
+
+TRIM_FRAME_START_SLIDER : Optional[gradio.Slider] = None
+TRIM_FRAME_END_SLIDER : Optional[gradio.Slider] = None
+
+
+def render() -> None:
+	global TRIM_FRAME_START_SLIDER
+	global TRIM_FRAME_END_SLIDER
+
+	with gradio.Box():
+		trim_frame_start_slider_args : Dict[str, Any] = {
+			'label': wording.get('trim_frame_start_slider_label'),
+			'step': 1,
+			'visible': False
+		}
+		trim_frame_end_slider_args : Dict[str, Any] = {
+			'label': wording.get('trim_frame_end_slider_label'),
+			'step': 1,
+			'visible': False
+		}
+		if is_video(facefusion.globals.target_path):
+			video_frame_total = get_video_frame_total(facefusion.globals.target_path)
+			trim_frame_start_slider_args['value'] = facefusion.globals.trim_frame_start or 0
+			trim_frame_start_slider_args['maximum'] = video_frame_total
+			trim_frame_start_slider_args['visible'] = True
+			trim_frame_end_slider_args['value'] = facefusion.globals.trim_frame_end or video_frame_total
+			trim_frame_end_slider_args['maximum'] = video_frame_total
+			trim_frame_end_slider_args['visible'] = True
+		with gradio.Row():
+			TRIM_FRAME_START_SLIDER = gradio.Slider(**trim_frame_start_slider_args)
+			TRIM_FRAME_END_SLIDER = gradio.Slider(**trim_frame_end_slider_args)
+
+
+def listen() -> None:
+	target_file = ui.get_component('target_file')
+	if target_file:
+		target_file.change(remote_update, outputs = [ TRIM_FRAME_START_SLIDER, TRIM_FRAME_END_SLIDER ])
+	TRIM_FRAME_START_SLIDER.change(lambda value : update_number('trim_frame_start', int(value)), inputs = TRIM_FRAME_START_SLIDER, outputs = TRIM_FRAME_START_SLIDER)
+	TRIM_FRAME_END_SLIDER.change(lambda value : update_number('trim_frame_end', int(value)), inputs = TRIM_FRAME_END_SLIDER, outputs = TRIM_FRAME_END_SLIDER)
+
+
+def remote_update() -> Tuple[Update, Update]:
+	sleep(0.1)
+	if is_video(facefusion.globals.target_path):
+		video_frame_total = get_video_frame_total(facefusion.globals.target_path)
+		facefusion.globals.trim_frame_start = 0
+		facefusion.globals.trim_frame_end = video_frame_total
+		return gradio.update(value = 0, maximum = video_frame_total, visible = True), gradio.update(value = video_frame_total, maximum = video_frame_total, visible = True)
+	return gradio.update(value = None, maximum = None, visible = False), gradio.update(value = None, maximum = None, visible = False)
+
+
+def update_number(name : str, value : int) -> Update:
+	setattr(facefusion.globals, name, value)
+	return gradio.update(value = value)
diff --git a/DeepFakeAI/uis/core.py b/DeepFakeAI/uis/core.py
new file mode 100644
index 0000000000000000000000000000000000000000..4d4c351b96200135b41b1394841929f987d93a61
--- /dev/null
+++ b/DeepFakeAI/uis/core.py
@@ -0,0 +1,67 @@
+from typing import Dict, Optional, Any
+import importlib
+import sys
+import cv2
+import gradio
+
+import facefusion.globals
+from facefusion import metadata, wording
+from facefusion.typing import Frame
+from facefusion.uis.typing import Component, ComponentName
+
+COMPONENTS: Dict[ComponentName, Component] = {}
+UI_LAYOUT_METHODS =\
+[
+	'pre_check',
+	'render',
+	'listen'
+]
+
+
+def launch() -> None:
+	with gradio.Blocks(theme = get_theme(), title = metadata.get('name') + ' ' + metadata.get('version')) as ui:
+		for ui_layout in facefusion.globals.ui_layouts:
+			ui_layout_module = load_ui_layout_module(ui_layout)
+			ui_layout_module.pre_check()
+			ui_layout_module.render()
+			ui_layout_module.listen()
+	ui.launch(debug=True, share=True)
+
+
+def load_ui_layout_module(ui_layout : str) -> Any:
+	try:
+		ui_layout_module = importlib.import_module('facefusion.uis.layouts.' + ui_layout)
+		for method_name in UI_LAYOUT_METHODS:
+			if not hasattr(ui_layout_module, method_name):
+				raise NotImplementedError
+	except ModuleNotFoundError:
+		sys.exit(wording.get('ui_layout_not_loaded').format(ui_layout = ui_layout))
+	except NotImplementedError:
+		sys.exit(wording.get('ui_layout_not_implemented').format(ui_layout = ui_layout))
+	return ui_layout_module
+
+
+def get_theme() -> gradio.Theme:
+	return gradio.themes.Soft(
+		primary_hue = gradio.themes.colors.red,
+		secondary_hue = gradio.themes.colors.gray,
+		font = gradio.themes.GoogleFont('Inter')
+	).set(
+		background_fill_primary = '*neutral_50',
+		block_label_text_size = '*text_sm',
+		block_title_text_size = '*text_sm'
+	)
+
+
+def get_component(name: ComponentName) -> Optional[Component]:
+	if name in COMPONENTS:
+		return COMPONENTS[name]
+	return None
+
+
+def register_component(name: ComponentName, component: Component) -> None:
+	COMPONENTS[name] = component
+
+
+def normalize_frame(frame : Frame) -> Frame:
+	return cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
diff --git a/DeepFakeAI/uis/layouts/__pycache__/default.cpython-310.pyc b/DeepFakeAI/uis/layouts/__pycache__/default.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..bd06555a33a4c193a68ccc505433e7f3685c93cd
Binary files /dev/null and b/DeepFakeAI/uis/layouts/__pycache__/default.cpython-310.pyc differ
diff --git a/DeepFakeAI/uis/layouts/benchmark.py b/DeepFakeAI/uis/layouts/benchmark.py
new file mode 100644
index 0000000000000000000000000000000000000000..f7ddf370d2632c93f30ae2dd4429e71ad36faf68
--- /dev/null
+++ b/DeepFakeAI/uis/layouts/benchmark.py
@@ -0,0 +1,37 @@
+import gradio
+
+from facefusion.uis.components import about, processors, execution, benchmark
+from facefusion.utilities import conditional_download
+
+
+def pre_check() -> bool:
+	conditional_download('.assets/examples',
+	[
+		'https://github.com/facefusion/facefusion-assets/releases/download/examples/source.jpg',
+		'https://github.com/facefusion/facefusion-assets/releases/download/examples/target-240p.mp4',
+		'https://github.com/facefusion/facefusion-assets/releases/download/examples/target-360p.mp4',
+		'https://github.com/facefusion/facefusion-assets/releases/download/examples/target-540p.mp4',
+		'https://github.com/facefusion/facefusion-assets/releases/download/examples/target-720p.mp4',
+		'https://github.com/facefusion/facefusion-assets/releases/download/examples/target-1080p.mp4',
+		'https://github.com/facefusion/facefusion-assets/releases/download/examples/target-1440p.mp4',
+		'https://github.com/facefusion/facefusion-assets/releases/download/examples/target-2160p.mp4'
+	])
+	return True
+
+
+def render() -> gradio.Blocks:
+	with gradio.Blocks() as layout:
+		with gradio.Row():
+			with gradio.Column(scale = 2):
+				about.render()
+				processors.render()
+				execution.render()
+			with gradio.Column(scale= 5):
+				benchmark.render()
+	return layout
+
+
+def listen() -> None:
+	processors.listen()
+	execution.listen()
+	benchmark.listen()
diff --git a/DeepFakeAI/uis/layouts/default.py b/DeepFakeAI/uis/layouts/default.py
new file mode 100644
index 0000000000000000000000000000000000000000..0eb2baa8311c042a23ff2e8fbdf160ad658e745c
--- /dev/null
+++ b/DeepFakeAI/uis/layouts/default.py
@@ -0,0 +1,44 @@
+import gradio
+
+from facefusion.uis.components import about, processors, execution, temp_frame, settings, source, target, preview, trim_frame, face_analyser, face_selector, output_settings, output
+
+
+def pre_check() -> bool:
+	return True
+
+
+def render() -> gradio.Blocks:
+	with gradio.Blocks() as layout:
+		with gradio.Row():
+			with gradio.Column(scale = 2):
+				about.render()
+				processors.render()
+				execution.render()
+				temp_frame.render()
+				settings.render()
+			with gradio.Column(scale = 2):
+				source.render()
+				target.render()
+				output_settings.render()
+				output.render()
+			with gradio.Column(scale = 3):
+				preview.render()
+				trim_frame.render()
+				face_selector.render()
+				face_analyser.render()
+	return layout
+
+
+def listen() -> None:
+	processors.listen()
+	execution.listen()
+	settings.listen()
+	temp_frame.listen()
+	source.listen()
+	target.listen()
+	preview.listen()
+	trim_frame.listen()
+	face_selector.listen()
+	face_analyser.listen()
+	output_settings.listen()
+	output.listen()
diff --git a/DeepFakeAI/uis/typing.py b/DeepFakeAI/uis/typing.py
new file mode 100644
index 0000000000000000000000000000000000000000..4abe384f07c4b90504e47291674905f85a5b8f52
--- /dev/null
+++ b/DeepFakeAI/uis/typing.py
@@ -0,0 +1,18 @@
+from typing import Literal, Dict, Any
+import gradio
+
+Component = gradio.File or gradio.Image or gradio.Video or gradio.Slider
+ComponentName = Literal\
+[
+	'source_file',
+	'target_file',
+	'preview_frame_slider',
+	'face_recognition_dropdown',
+	'reference_face_position_gallery',
+	'reference_face_distance_slider',
+	'face_analyser_direction_dropdown',
+	'face_analyser_age_dropdown',
+	'face_analyser_gender_dropdown',
+	'frame_processors_checkbox_group'
+]
+Update = Dict[Any, Any]
diff --git a/DeepFakeAI/utilities.py b/DeepFakeAI/utilities.py
new file mode 100644
index 0000000000000000000000000000000000000000..97d55481d2fde0e4c11a27e981111f03f2409572
--- /dev/null
+++ b/DeepFakeAI/utilities.py
@@ -0,0 +1,190 @@
+import glob
+import mimetypes
+import os
+import platform
+import shutil
+import ssl
+import subprocess
+import tempfile
+import urllib
+from pathlib import Path
+from typing import List, Optional
+
+import onnxruntime
+from tqdm import tqdm
+
+import facefusion.globals
+from facefusion import wording
+
+TEMP_DIRECTORY_PATH = os.path.join(tempfile.gettempdir(), 'facefusion')
+TEMP_OUTPUT_NAME = 'temp.mp4'
+
+# monkey patch ssl
+if platform.system().lower() == 'darwin':
+	ssl._create_default_https_context = ssl._create_unverified_context
+
+
+def run_ffmpeg(args : List[str]) -> bool:
+	commands = [ 'ffmpeg', '-hide_banner', '-loglevel', 'error' ]
+	commands.extend(args)
+	try:
+		subprocess.check_output(commands, stderr = subprocess.STDOUT)
+		return True
+	except subprocess.CalledProcessError:
+		return False
+
+
+def detect_fps(target_path : str) -> Optional[float]:
+	commands = [ 'ffprobe', '-v', 'error', '-select_streams', 'v:0', '-show_entries', 'stream=r_frame_rate', '-of', 'default=noprint_wrappers = 1:nokey = 1', target_path ]
+	output = subprocess.check_output(commands).decode().strip().split('/')
+	try:
+		numerator, denominator = map(int, output)
+		return numerator / denominator
+	except (ValueError, ZeroDivisionError):
+		return None
+
+
+def extract_frames(target_path : str, fps : float) -> bool:
+	temp_directory_path = get_temp_directory_path(target_path)
+	temp_frame_quality = round(31 - (facefusion.globals.temp_frame_quality * 0.31))
+	trim_frame_start = facefusion.globals.trim_frame_start
+	trim_frame_end = facefusion.globals.trim_frame_end
+	commands = [ '-hwaccel', 'auto', '-i', target_path, '-q:v', str(temp_frame_quality), '-pix_fmt', 'rgb24', ]
+	if trim_frame_start is not None and trim_frame_end is not None:
+		commands.extend([ '-vf', 'trim=start_frame=' + str(trim_frame_start) + ':end_frame=' + str(trim_frame_end) + ',fps=' + str(fps) ])
+	elif trim_frame_start is not None:
+		commands.extend([ '-vf', 'trim=start_frame=' + str(trim_frame_start) + ',fps=' + str(fps) ])
+	elif trim_frame_end is not None:
+		commands.extend([ '-vf', 'trim=end_frame=' + str(trim_frame_end) + ',fps=' + str(fps) ])
+	else:
+		commands.extend([ '-vf', 'fps=' + str(fps) ])
+	commands.extend([os.path.join(temp_directory_path, '%04d.' + facefusion.globals.temp_frame_format)])
+	return run_ffmpeg(commands)
+
+
+def create_video(target_path : str, fps : float) -> bool:
+	temp_output_path = get_temp_output_path(target_path)
+	temp_directory_path = get_temp_directory_path(target_path)
+	output_video_quality = round(51 - (facefusion.globals.output_video_quality * 0.5))
+	commands = [ '-hwaccel', 'auto', '-r', str(fps), '-i', os.path.join(temp_directory_path, '%04d.' + facefusion.globals.temp_frame_format), '-c:v', facefusion.globals.output_video_encoder ]
+	if facefusion.globals.output_video_encoder in [ 'libx264', 'libx265', 'libvpx' ]:
+		commands.extend([ '-crf', str(output_video_quality) ])
+	if facefusion.globals.output_video_encoder in [ 'h264_nvenc', 'hevc_nvenc' ]:
+		commands.extend([ '-cq', str(output_video_quality) ])
+	commands.extend([ '-pix_fmt', 'yuv420p', '-vf', 'colorspace=bt709:iall=bt601-6-625', '-y', temp_output_path ])
+	return run_ffmpeg(commands)
+
+
+def restore_audio(target_path : str, output_path : str) -> None:
+	fps = detect_fps(target_path)
+	trim_frame_start = facefusion.globals.trim_frame_start
+	trim_frame_end = facefusion.globals.trim_frame_end
+	temp_output_path = get_temp_output_path(target_path)
+	commands = [ '-hwaccel', 'auto', '-i', temp_output_path, '-i', target_path ]
+	if trim_frame_start is None and trim_frame_end is None:
+		commands.extend([ '-c:a', 'copy' ])
+	else:
+		if trim_frame_start is not None:
+			start_time = trim_frame_start / fps
+			commands.extend([ '-ss', str(start_time) ])
+		else:
+			commands.extend([ '-ss', '0' ])
+		if trim_frame_end is not None:
+			end_time = trim_frame_end / fps
+			commands.extend([ '-to', str(end_time) ])
+		commands.extend([ '-c:a', 'aac' ])
+	commands.extend([ '-map', '0:v:0', '-map', '1:a:0', '-y', output_path ])
+	done = run_ffmpeg(commands)
+	if not done:
+		move_temp(target_path, output_path)
+
+
+def get_temp_frame_paths(target_path : str) -> List[str]:
+	temp_directory_path = get_temp_directory_path(target_path)
+	return glob.glob((os.path.join(glob.escape(temp_directory_path), '*.' + facefusion.globals.temp_frame_format)))
+
+
+def get_temp_directory_path(target_path : str) -> str:
+	target_name, _ = os.path.splitext(os.path.basename(target_path))
+	return os.path.join(TEMP_DIRECTORY_PATH, target_name)
+
+
+def get_temp_output_path(target_path : str) -> str:
+	temp_directory_path = get_temp_directory_path(target_path)
+	return os.path.join(temp_directory_path, TEMP_OUTPUT_NAME)
+
+
+def normalize_output_path(source_path : str, target_path : str, output_path : str) -> Optional[str]:
+	if source_path and target_path and output_path:
+		source_name, _ = os.path.splitext(os.path.basename(source_path))
+		target_name, target_extension = os.path.splitext(os.path.basename(target_path))
+		if os.path.isdir(output_path):
+			return os.path.join(output_path, source_name + '-' + target_name + target_extension)
+	return output_path
+
+
+def create_temp(target_path : str) -> None:
+	temp_directory_path = get_temp_directory_path(target_path)
+	Path(temp_directory_path).mkdir(parents = True, exist_ok = True)
+
+
+def move_temp(target_path : str, output_path : str) -> None:
+	temp_output_path = get_temp_output_path(target_path)
+	if os.path.isfile(temp_output_path):
+		if os.path.isfile(output_path):
+			os.remove(output_path)
+		shutil.move(temp_output_path, output_path)
+
+
+def clear_temp(target_path : str) -> None:
+	temp_directory_path = get_temp_directory_path(target_path)
+	parent_directory_path = os.path.dirname(temp_directory_path)
+	if not facefusion.globals.keep_temp and os.path.isdir(temp_directory_path):
+		shutil.rmtree(temp_directory_path)
+	if os.path.exists(parent_directory_path) and not os.listdir(parent_directory_path):
+		os.rmdir(parent_directory_path)
+
+
+def is_image(image_path : str) -> bool:
+	if image_path and os.path.isfile(image_path):
+		mimetype, _ = mimetypes.guess_type(image_path)
+		return bool(mimetype and mimetype.startswith('image/'))
+	return False
+
+
+def is_video(video_path : str) -> bool:
+	if video_path and os.path.isfile(video_path):
+		mimetype, _ = mimetypes.guess_type(video_path)
+		return bool(mimetype and mimetype.startswith('video/'))
+	return False
+
+
+def conditional_download(download_directory_path : str, urls : List[str]) -> None:
+	if not os.path.exists(download_directory_path):
+		os.makedirs(download_directory_path)
+	for url in urls:
+		download_file_path = os.path.join(download_directory_path, os.path.basename(url))
+		if not os.path.exists(download_file_path):
+			request = urllib.request.urlopen(url) # type: ignore[attr-defined]
+			total = int(request.headers.get('Content-Length', 0))
+			with tqdm(total = total, desc = wording.get('downloading'), unit = 'B', unit_scale = True, unit_divisor = 1024) as progress:
+				urllib.request.urlretrieve(url, download_file_path, reporthook = lambda count, block_size, total_size: progress.update(block_size)) # type: ignore[attr-defined]
+
+
+def resolve_relative_path(path : str) -> str:
+	return os.path.abspath(os.path.join(os.path.dirname(__file__), path))
+
+
+def list_module_names(path : str) -> Optional[List[str]]:
+	if os.path.exists(path):
+		files = os.listdir(path)
+		return [Path(file).stem for file in files if not Path(file).stem.startswith('__')]
+	return None
+
+
+def encode_execution_providers(execution_providers : List[str]) -> List[str]:
+	return [execution_provider.replace('ExecutionProvider', '').lower() for execution_provider in execution_providers]
+
+
+def decode_execution_providers(execution_providers : List[str]) -> List[str]:
+	return [provider for provider, encoded_execution_provider in zip(onnxruntime.get_available_providers(), encode_execution_providers(onnxruntime.get_available_providers())) if any(execution_provider in encoded_execution_provider for execution_provider in execution_providers)]
diff --git a/DeepFakeAI/wording.py b/DeepFakeAI/wording.py
new file mode 100644
index 0000000000000000000000000000000000000000..1d70363ea7546eeb3b3ec224eb04848db727718e
--- /dev/null
+++ b/DeepFakeAI/wording.py
@@ -0,0 +1,88 @@
+WORDING =\
+{
+	'python_not_supported': 'Python version is not supported, upgrade to {version} or higher',
+	'ffmpeg_not_installed': 'FFMpeg is not installed',
+	'source_help': 'select a source image',
+	'target_help': 'select a target image or video',
+	'output_help': 'specify the output file or directory',
+	'frame_processors_help': 'choose from the available frame processors (choices: {choices}, ...)',
+	'ui_layouts_help': 'choose from the available ui layouts (choices: {choices}, ...)',
+	'keep_fps_help': 'preserve the frames per second (fps) of the target',
+	'keep_temp_help': 'retain temporary frames after processing',
+	'skip_audio_help': 'omit audio from the target',
+	'face_recognition_help': 'specify the method for face recognition',
+	'face_analyser_direction_help': 'specify the direction used for face analysis',
+	'face_analyser_age_help': 'specify the age used for face analysis',
+	'face_analyser_gender_help': 'specify the gender used for face analysis',
+	'reference_face_position_help': 'specify the position of the reference face',
+	'reference_face_distance_help': 'specify the distance between the reference face and the target face',
+	'reference_frame_number_help': 'specify the number of the reference frame',
+	'trim_frame_start_help': 'specify the start frame for extraction',
+	'trim_frame_end_help': 'specify the end frame for extraction',
+	'temp_frame_format_help': 'specify the image format used for frame extraction',
+	'temp_frame_quality_help': 'specify the image quality used for frame extraction',
+	'output_video_encoder_help': 'specify the encoder used for the output video',
+	'output_video_quality_help': 'specify the quality used for the output video',
+	'max_memory_help': 'specify the maximum amount of ram to be used (in gb)',
+	'execution_providers_help': 'choose from the available execution providers (choices: {choices}, ...)',
+	'execution_thread_count_help': 'specify the number of execution threads',
+	'execution_queue_count_help': 'specify the number of execution queries',
+	'creating_temp': 'Creating temporary resources',
+	'extracting_frames_fps': 'Extracting frames with {fps} FPS',
+	'processing': 'Processing',
+	'downloading': 'Downloading',
+	'temp_frames_not_found': 'Temporary frames not found',
+	'creating_video_fps': 'Creating video with {fps} FPS',
+	'creating_video_failed': 'Creating video failed',
+	'skipping_audio': 'Skipping audio',
+	'restoring_audio': 'Restoring audio',
+	'clearing_temp': 'Clearing temporary resources',
+	'processing_image_succeed': 'Processing to image succeed',
+	'processing_image_failed': 'Processing to image failed',
+	'processing_video_succeed': 'Processing to video succeed',
+	'processing_video_failed': 'Processing to video failed',
+	'select_image_source': 'Select an image for source path',
+	'select_image_or_video_target': 'Select an image or video for target path',
+	'no_source_face_detected': 'No source face detected',
+	'frame_processor_not_loaded': 'Frame processor {frame_processor} could not be loaded',
+	'frame_processor_not_implemented': 'Frame processor {frame_processor} not implemented correctly',
+	'ui_layout_not_loaded': 'UI layout {ui_layout} could not be loaded',
+	'ui_layout_not_implemented': 'UI layout {ui_layout} not implemented correctly',
+	'start_button_label': 'START',
+	'clear_button_label': 'CLEAR',
+	'benchmark_result_dataframe_label': 'BENCHMARK RESULT',
+	'benchmark_cycles_slider_label': 'BENCHMARK CYCLES',
+	'execution_providers_checkbox_group_label': 'EXECUTION PROVIDERS',
+	'execution_thread_count_slider_label': 'EXECUTION THREAD COUNT',
+	'execution_queue_count_slider_label': 'EXECUTION QUEUE COUNT',
+	'face_analyser_direction_dropdown_label': 'FACE ANALYSER DIRECTION',
+	'face_analyser_age_dropdown_label': 'FACE ANALYSER AGE',
+	'face_analyser_gender_dropdown_label': 'FACE ANALYSER GENDER',
+	'reference_face_gallery_label': 'REFERENCE FACE',
+	'face_recognition_dropdown_label': 'FACE RECOGNITION',
+	'reference_face_distance_slider_label': 'REFERENCE FACE DISTANCE',
+	'output_image_or_video_label': 'OUTPUT',
+	'output_video_encoder_dropdown_label': 'OUTPUT VIDEO ENCODER',
+	'output_video_quality_slider_label': 'OUTPUT VIDEO QUALITY',
+	'preview_image_label': 'PREVIEW',
+	'preview_frame_slider_label': 'PREVIEW FRAME',
+	'frame_processors_checkbox_group_label': 'FRAME PROCESSORS',
+	'keep_fps_checkbox_label': 'KEEP FPS',
+	'keep_temp_checkbox_label': 'KEEP TEMP',
+	'skip_audio_checkbox_label': 'SKIP AUDIO',
+	'temp_frame_format_dropdown_label': 'TEMP FRAME FORMAT',
+	'temp_frame_quality_slider_label': 'TEMP FRAME QUALITY',
+	'trim_frame_start_slider_label': 'TRIM FRAME START',
+	'trim_frame_end_slider_label': 'TRIM FRAME END',
+	'source_file_label': 'SOURCE',
+	'target_file_label': 'TARGET',
+	'point': '.',
+	'comma': ',',
+	'colon': ':',
+	'question_mark': '?',
+	'exclamation_mark': '!'
+}
+
+
+def get(key : str) -> str:
+	return WORDING[key]
diff --git a/FB_IMG_1690825211219-IMG_20230517_192234.jpg b/FB_IMG_1690825211219-IMG_20230517_192234.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..a4df7d9947dd1c38f7047f83459028f073256444
Binary files /dev/null and b/FB_IMG_1690825211219-IMG_20230517_192234.jpg differ
diff --git a/gfpgan/weights/detection_Resnet50_Final.pth b/gfpgan/weights/detection_Resnet50_Final.pth
new file mode 100644
index 0000000000000000000000000000000000000000..16546738ce0a00a9fd47585e0fc52744d31cc117
--- /dev/null
+++ b/gfpgan/weights/detection_Resnet50_Final.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6d1de9c2944f2ccddca5f5e010ea5ae64a39845a86311af6fdf30841b0a5a16d
+size 109497761
diff --git a/gfpgan/weights/parsing_parsenet.pth b/gfpgan/weights/parsing_parsenet.pth
new file mode 100644
index 0000000000000000000000000000000000000000..1ac2efc50360a79c9905dbac57d9d99cbfbe863c
--- /dev/null
+++ b/gfpgan/weights/parsing_parsenet.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3d558d8d0e42c20224f13cf5a29c79eba2d59913419f945545d8cf7b72920de2
+size 85331193
diff --git a/mypy.ini b/mypy.ini
new file mode 100644
index 0000000000000000000000000000000000000000..64218bc23688632a08c98ec4a0451ed46f8ed5e5
--- /dev/null
+++ b/mypy.ini
@@ -0,0 +1,7 @@
+[mypy]
+check_untyped_defs = True
+disallow_any_generics = True
+disallow_untyped_calls = True
+disallow_untyped_defs = True
+ignore_missing_imports = True
+strict_optional = False
diff --git a/requirements-ci.txt b/requirements-ci.txt
new file mode 100644
index 0000000000000000000000000000000000000000..f381ae5dc8bd37823ff98638ff252be9bbce8eec
--- /dev/null
+++ b/requirements-ci.txt
@@ -0,0 +1,11 @@
+insightface==0.7.3
+numpy==1.24.3
+onnx==1.14.0
+onnxruntime==1.15.1
+opencv-python==4.8.0.74
+opennsfw2==0.10.2
+protobuf==4.23.4
+pytest==7.4.0
+psutil==5.9.5
+tensorflow==2.13.0
+tqdm==4.65.0
diff --git a/requirements.txt b/requirements.txt
new file mode 100644
index 0000000000000000000000000000000000000000..8d430b2a9625cd07995f80d7e530a65d45152d62
--- /dev/null
+++ b/requirements.txt
@@ -0,0 +1,19 @@
+--extra-index-url https://download.pytorch.org/whl/cu118
+
+gfpgan==1.3.8
+gradio==3.40.1
+insightface==0.7.3
+numpy==1.24.3
+onnx==1.14.0
+onnxruntime==1.15.1; python_version != '3.9' and sys_platform == 'darwin' and platform_machine != 'arm64'
+onnxruntime-coreml==1.13.1; python_version == '3.9' and sys_platform == 'darwin' and platform_machine != 'arm64'
+onnxruntime-gpu==1.15.1; sys_platform != 'darwin'
+onnxruntime-silicon==1.13.1; sys_platform == 'darwin' and platform_machine == 'arm64'
+opencv-python==4.8.0.74
+opennsfw2==0.10.2
+pillow==10.0.0
+protobuf==4.23.4
+psutil==5.9.5
+realesrgan==0.3.0
+tensorflow==2.13.0
+tqdm==4.65.0
diff --git a/run.py b/run.py
new file mode 100644
index 0000000000000000000000000000000000000000..11500cdc86edf1a68cf1c53b78d4e7e01a6393c4
--- /dev/null
+++ b/run.py
@@ -0,0 +1,6 @@
+#!/usr/bin/env python3
+
+from DeepFakeAI import core
+
+if __name__ == '__main__':
+    core.run()
diff --git a/tests/__init__.py b/tests/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/tests/test_cli.py b/tests/test_cli.py
new file mode 100644
index 0000000000000000000000000000000000000000..8a48a028ebf8a6a07abb4c5d834e849c051a599d
--- /dev/null
+++ b/tests/test_cli.py
@@ -0,0 +1,31 @@
+import subprocess
+import pytest
+
+from facefusion import wording
+from facefusion.utilities import conditional_download
+
+
+@pytest.fixture(scope = 'module', autouse = True)
+def before_all() -> None:
+	conditional_download('.assets/examples',
+	[
+		'https://github.com/facefusion/facefusion-assets/releases/download/examples/source.jpg',
+		'https://github.com/facefusion/facefusion-assets/releases/download/examples/target-1080p.mp4'
+	])
+	subprocess.run([ 'ffmpeg', '-i', '.assets/examples/target-1080p.mp4', '-vframes', '1', '.assets/examples/target-1080p.jpg' ])
+
+
+def test_image_to_image() -> None:
+	commands = [ 'python', 'run.py', '-s', '.assets/examples/source.jpg', '-t', '.assets/examples/target-1080p.jpg', '-o', '.assets/examples' ]
+	run = subprocess.run(commands, stdout = subprocess.PIPE)
+
+	assert run.returncode == 0
+	assert wording.get('processing_image_succeed') in run.stdout.decode()
+
+
+def test_image_to_video() -> None:
+	commands = [ 'python', 'run.py', '-s', '.assets/examples/source.jpg', '-t', '.assets/examples/target-1080p.mp4', '-o', '.assets/examples', '--trim-frame-end', '10' ]
+	run = subprocess.run(commands, stdout = subprocess.PIPE)
+
+	assert run.returncode == 0
+	assert wording.get('processing_video_succeed') in run.stdout.decode()
diff --git a/tests/test_utilities.py b/tests/test_utilities.py
new file mode 100644
index 0000000000000000000000000000000000000000..4dcb8e25247f029bcf0e3c587be151f8165b33ae
--- /dev/null
+++ b/tests/test_utilities.py
@@ -0,0 +1,107 @@
+import glob
+import subprocess
+import pytest
+
+import facefusion.globals
+from facefusion.utilities import conditional_download, detect_fps, extract_frames, create_temp, get_temp_directory_path, clear_temp
+
+
+@pytest.fixture(scope = 'module', autouse = True)
+def before_all() -> None:
+	facefusion.globals.temp_frame_quality = 100
+	facefusion.globals.trim_frame_start = None
+	facefusion.globals.trim_frame_end = None
+	facefusion.globals.temp_frame_format = 'png'
+	conditional_download('.assets/examples',
+	[
+		'https://github.com/facefusion/facefusion-assets/releases/download/examples/target-240p.mp4'
+	])
+	subprocess.run([ 'ffmpeg', '-i', '.assets/examples/target-240p.mp4', '-vf', 'fps=25', '.assets/examples/target-240p-25fps.mp4' ])
+	subprocess.run([ 'ffmpeg', '-i', '.assets/examples/target-240p.mp4', '-vf', 'fps=30', '.assets/examples/target-240p-30fps.mp4' ])
+	subprocess.run([ 'ffmpeg', '-i', '.assets/examples/target-240p.mp4', '-vf', 'fps=60', '.assets/examples/target-240p-60fps.mp4' ])
+
+
+@pytest.fixture(scope = 'function', autouse = True)
+def before_each() -> None:
+	facefusion.globals.trim_frame_start = None
+	facefusion.globals.trim_frame_end = None
+	facefusion.globals.temp_frame_quality = 90
+	facefusion.globals.temp_frame_format = 'jpg'
+
+
+def test_detect_fps() -> None:
+	assert detect_fps('.assets/examples/target-240p-25fps.mp4') == 25.0
+	assert detect_fps('.assets/examples/target-240p-30fps.mp4') == 30.0
+	assert detect_fps('.assets/examples/target-240p-60fps.mp4') == 60.0
+
+
+def test_extract_frames() -> None:
+	target_paths =\
+	[
+		'.assets/examples/target-240p-25fps.mp4',
+		'.assets/examples/target-240p-30fps.mp4',
+		'.assets/examples/target-240p-60fps.mp4'
+	]
+	for target_path in target_paths:
+		temp_directory_path = get_temp_directory_path(target_path)
+		create_temp(target_path)
+
+		assert extract_frames(target_path, 30.0) is True
+		assert len(glob.glob1(temp_directory_path, '*.jpg')) == 324
+
+		clear_temp(target_path)
+
+
+def test_extract_frames_with_trim_start() -> None:
+	facefusion.globals.trim_frame_start = 224
+	data_provider =\
+	[
+		('.assets/examples/target-240p-25fps.mp4', 55),
+		('.assets/examples/target-240p-30fps.mp4', 100),
+		('.assets/examples/target-240p-60fps.mp4', 212)
+	]
+	for target_path, frame_total in data_provider:
+		temp_directory_path = get_temp_directory_path(target_path)
+		create_temp(target_path)
+
+		assert extract_frames(target_path, 30.0) is True
+		assert len(glob.glob1(temp_directory_path, '*.jpg')) == frame_total
+
+		clear_temp(target_path)
+
+
+def test_extract_frames_with_trim_start_and_trim_end() -> None:
+	facefusion.globals.trim_frame_start = 124
+	facefusion.globals.trim_frame_end = 224
+	data_provider =\
+	[
+		('.assets/examples/target-240p-25fps.mp4', 120),
+		('.assets/examples/target-240p-30fps.mp4', 100),
+		('.assets/examples/target-240p-60fps.mp4', 50)
+	]
+	for target_path, frame_total in data_provider:
+		temp_directory_path = get_temp_directory_path(target_path)
+		create_temp(target_path)
+
+		assert extract_frames(target_path, 30.0) is True
+		assert len(glob.glob1(temp_directory_path, '*.jpg')) == frame_total
+
+		clear_temp(target_path)
+
+
+def test_extract_frames_with_trim_end() -> None:
+	facefusion.globals.trim_frame_end = 100
+	data_provider =\
+	[
+		('.assets/examples/target-240p-25fps.mp4', 120),
+		('.assets/examples/target-240p-30fps.mp4', 100),
+		('.assets/examples/target-240p-60fps.mp4', 50)
+	]
+	for target_path, frame_total in data_provider:
+		temp_directory_path = get_temp_directory_path(target_path)
+		create_temp(target_path)
+
+		assert extract_frames(target_path, 30.0) is True
+		assert len(glob.glob1(temp_directory_path, '*.jpg')) == frame_total
+
+		clear_temp(target_path)