id
int64
0
190k
prompt
stringlengths
21
13.4M
docstring
stringlengths
1
12k
802
from typing import Optional import gradio import facefusion.globals import facefusion.choices from facefusion import wording EXECUTION_THREAD_COUNT_SLIDER : Optional[gradio.Slider] = None def update_execution_thread_count(execution_thread_count : int = 1) -> None: facefusion.globals.execution_thread_count = execution_thread_count def listen() -> None: EXECUTION_THREAD_COUNT_SLIDER.change(update_execution_thread_count, inputs = EXECUTION_THREAD_COUNT_SLIDER)
null
803
from typing import Any, Dict, List, Optional from time import sleep import cv2 import gradio import facefusion.globals from facefusion import wording, logger from facefusion.audio import get_audio_frame from facefusion.common_helper import get_first from facefusion.core import conditional_append_reference_faces from facefusion.face_analyser import get_average_face, clear_face_analyser from facefusion.face_store import clear_static_faces, get_reference_faces, clear_reference_faces from facefusion.typing import Face, FaceSet, AudioFrame, VisionFrame from facefusion.vision import get_video_frame, count_video_frame_total, normalize_frame_color, resize_frame_resolution, read_static_image, read_static_images from facefusion.filesystem import is_image, is_video, filter_audio_paths from facefusion.content_analyser import analyse_frame from facefusion.processors.frame.core import load_frame_processor_module from facefusion.uis.typing import ComponentName from facefusion.uis.core import get_ui_component, register_ui_component PREVIEW_IMAGE : Optional[gradio.Image] = None PREVIEW_FRAME_SLIDER : Optional[gradio.Slider] = None def process_preview_frame(reference_faces : FaceSet, source_face : Face, source_audio_frame : AudioFrame, target_vision_frame : VisionFrame) -> VisionFrame: def get_audio_frame(audio_path : str, fps : Fps, frame_number : int = 0) -> Optional[AudioFrame]: def get_first(__list__ : Any) -> Any: def conditional_append_reference_faces() -> None: def get_average_face(vision_frames : List[VisionFrame], position : int = 0) -> Optional[Face]: def get_reference_faces() -> Optional[FaceSet]: def get_video_frame(video_path : str, frame_number : int = 0) -> Optional[VisionFrame]: def count_video_frame_total(video_path : str) -> int: def normalize_frame_color(vision_frame : VisionFrame) -> VisionFrame: def read_static_image(image_path : str) -> Optional[VisionFrame]: def read_static_images(image_paths : List[str]) -> Optional[List[VisionFrame]]: def is_image(image_path : str) -> bool: def is_video(video_path : str) -> bool: def filter_audio_paths(paths : List[str]) -> List[str]: def register_ui_component(name : ComponentName, component: Component) -> None: def render() -> None: global PREVIEW_IMAGE global PREVIEW_FRAME_SLIDER preview_image_args: Dict[str, Any] =\ { 'label': wording.get('uis.preview_image'), 'interactive': False } preview_frame_slider_args: Dict[str, Any] =\ { 'label': wording.get('uis.preview_frame_slider'), 'step': 1, 'minimum': 0, 'maximum': 100, 'visible': False } conditional_append_reference_faces() reference_faces = get_reference_faces() if 'reference' in facefusion.globals.face_selector_mode else None source_frames = read_static_images(facefusion.globals.source_paths) source_face = get_average_face(source_frames) source_audio_path = get_first(filter_audio_paths(facefusion.globals.source_paths)) if source_audio_path and facefusion.globals.output_video_fps: source_audio_frame = get_audio_frame(source_audio_path, facefusion.globals.output_video_fps, facefusion.globals.reference_frame_number) else: source_audio_frame = None if is_image(facefusion.globals.target_path): target_vision_frame = read_static_image(facefusion.globals.target_path) preview_vision_frame = process_preview_frame(reference_faces, source_face, source_audio_frame, target_vision_frame) preview_image_args['value'] = normalize_frame_color(preview_vision_frame) if is_video(facefusion.globals.target_path): temp_vision_frame = get_video_frame(facefusion.globals.target_path, facefusion.globals.reference_frame_number) preview_vision_frame = process_preview_frame(reference_faces, source_face, source_audio_frame, temp_vision_frame) preview_image_args['value'] = normalize_frame_color(preview_vision_frame) preview_image_args['visible'] = True preview_frame_slider_args['value'] = facefusion.globals.reference_frame_number preview_frame_slider_args['maximum'] = count_video_frame_total(facefusion.globals.target_path) preview_frame_slider_args['visible'] = True PREVIEW_IMAGE = gradio.Image(**preview_image_args) PREVIEW_FRAME_SLIDER = gradio.Slider(**preview_frame_slider_args) register_ui_component('preview_frame_slider', PREVIEW_FRAME_SLIDER)
null
804
from typing import Any, Dict, List, Optional from time import sleep import cv2 import gradio import facefusion.globals from facefusion import wording, logger from facefusion.audio import get_audio_frame from facefusion.common_helper import get_first from facefusion.core import conditional_append_reference_faces from facefusion.face_analyser import get_average_face, clear_face_analyser from facefusion.face_store import clear_static_faces, get_reference_faces, clear_reference_faces from facefusion.typing import Face, FaceSet, AudioFrame, VisionFrame from facefusion.vision import get_video_frame, count_video_frame_total, normalize_frame_color, resize_frame_resolution, read_static_image, read_static_images from facefusion.filesystem import is_image, is_video, filter_audio_paths from facefusion.content_analyser import analyse_frame from facefusion.processors.frame.core import load_frame_processor_module from facefusion.uis.typing import ComponentName from facefusion.uis.core import get_ui_component, register_ui_component PREVIEW_IMAGE : Optional[gradio.Image] = None PREVIEW_FRAME_SLIDER : Optional[gradio.Slider] = None def clear_and_update_preview_image(frame_number : int = 0) -> gradio.Image: clear_face_analyser() clear_reference_faces() clear_static_faces() sleep(0.5) return update_preview_image(frame_number) def update_preview_image(frame_number : int = 0) -> gradio.Image: for frame_processor in facefusion.globals.frame_processors: frame_processor_module = load_frame_processor_module(frame_processor) while not frame_processor_module.post_check(): logger.disable() sleep(0.5) logger.enable() conditional_append_reference_faces() reference_faces = get_reference_faces() if 'reference' in facefusion.globals.face_selector_mode else None source_frames = read_static_images(facefusion.globals.source_paths) source_face = get_average_face(source_frames) source_audio_path = get_first(filter_audio_paths(facefusion.globals.source_paths)) if source_audio_path and facefusion.globals.output_video_fps: source_audio_frame = get_audio_frame(source_audio_path, facefusion.globals.output_video_fps, facefusion.globals.reference_frame_number) else: source_audio_frame = None if is_image(facefusion.globals.target_path): target_vision_frame = read_static_image(facefusion.globals.target_path) preview_vision_frame = process_preview_frame(reference_faces, source_face, source_audio_frame, target_vision_frame) preview_vision_frame = normalize_frame_color(preview_vision_frame) return gradio.Image(value = preview_vision_frame) if is_video(facefusion.globals.target_path): temp_vision_frame = get_video_frame(facefusion.globals.target_path, frame_number) preview_vision_frame = process_preview_frame(reference_faces, source_face, source_audio_frame, temp_vision_frame) preview_vision_frame = normalize_frame_color(preview_vision_frame) return gradio.Image(value = preview_vision_frame) return gradio.Image(value = None) def update_preview_frame_slider() -> gradio.Slider: if is_video(facefusion.globals.target_path): video_frame_total = count_video_frame_total(facefusion.globals.target_path) return gradio.Slider(maximum = video_frame_total, visible = True) return gradio.Slider(value = None, maximum = None, visible = False) ComponentName = Literal\ [ 'source_audio', 'source_image', 'target_image', 'target_video', 'preview_frame_slider', 'face_selector_mode_dropdown', 'reference_face_position_gallery', 'reference_face_distance_slider', 'face_analyser_order_dropdown', 'face_analyser_age_dropdown', 'face_analyser_gender_dropdown', 'face_detector_model_dropdown', 'face_detector_size_dropdown', 'face_detector_score_slider', 'face_mask_types_checkbox_group', 'face_mask_blur_slider', 'face_mask_padding_top_slider', 'face_mask_padding_bottom_slider', 'face_mask_padding_left_slider', 'face_mask_padding_right_slider', 'face_mask_region_checkbox_group', 'frame_processors_checkbox_group', 'face_debugger_items_checkbox_group', 'face_enhancer_model_dropdown', 'face_enhancer_blend_slider', 'face_swapper_model_dropdown', 'frame_enhancer_model_dropdown', 'frame_enhancer_blend_slider', 'lip_syncer_model_dropdown', 'output_path_textbox', 'output_video_fps_slider', 'benchmark_runs_checkbox_group', 'benchmark_cycles_slider', 'webcam_mode_radio', 'webcam_resolution_dropdown', 'webcam_fps_slider' ] def get_ui_component(name : ComponentName) -> Optional[Component]: if name in UI_COMPONENTS: return UI_COMPONENTS[name] return None def listen() -> None: PREVIEW_FRAME_SLIDER.release(update_preview_image, inputs = PREVIEW_FRAME_SLIDER, outputs = PREVIEW_IMAGE) reference_face_position_gallery = get_ui_component('reference_face_position_gallery') if reference_face_position_gallery: reference_face_position_gallery.select(update_preview_image, inputs = PREVIEW_FRAME_SLIDER, outputs = PREVIEW_IMAGE) multi_one_component_names : List[ComponentName] =\ [ 'source_audio', 'source_image', 'target_image', 'target_video' ] for component_name in multi_one_component_names: component = get_ui_component(component_name) if component: for method in [ 'upload', 'change', 'clear' ]: getattr(component, method)(update_preview_image, inputs = PREVIEW_FRAME_SLIDER, outputs = PREVIEW_IMAGE) multi_two_component_names : List[ComponentName] =\ [ 'target_image', 'target_video' ] for component_name in multi_two_component_names: component = get_ui_component(component_name) if component: for method in [ 'upload', 'change', 'clear' ]: getattr(component, method)(update_preview_frame_slider, outputs = PREVIEW_FRAME_SLIDER) change_one_component_names : List[ComponentName] =\ [ 'face_debugger_items_checkbox_group', 'face_enhancer_blend_slider', 'frame_enhancer_blend_slider', 'face_selector_mode_dropdown', 'reference_face_distance_slider', 'face_mask_types_checkbox_group', 'face_mask_blur_slider', 'face_mask_padding_top_slider', 'face_mask_padding_bottom_slider', 'face_mask_padding_left_slider', 'face_mask_padding_right_slider', 'face_mask_region_checkbox_group', 'face_analyser_order_dropdown', 'face_analyser_age_dropdown', 'face_analyser_gender_dropdown', 'output_video_fps_slider' ] for component_name in change_one_component_names: component = get_ui_component(component_name) if component: component.change(update_preview_image, inputs = PREVIEW_FRAME_SLIDER, outputs = PREVIEW_IMAGE) change_two_component_names : List[ComponentName] =\ [ 'frame_processors_checkbox_group', 'face_enhancer_model_dropdown', 'face_swapper_model_dropdown', 'frame_enhancer_model_dropdown', 'lip_syncer_model_dropdown', 'face_detector_model_dropdown', 'face_detector_size_dropdown', 'face_detector_score_slider' ] for component_name in change_two_component_names: component = get_ui_component(component_name) if component: component.change(clear_and_update_preview_image, inputs = PREVIEW_FRAME_SLIDER, outputs = PREVIEW_IMAGE)
null
805
from typing import List, Any import numpy def create_int_range(start : int, stop : int, step : int) -> List[int]: return (numpy.arange(start, stop + step, step)).tolist()
null
806
from typing import List, Any import numpy def create_float_range(start : float, stop : float, step : float) -> List[float]: return (numpy.around(numpy.arange(start, stop + step, step), decimals = 2)).tolist()
null
807
from typing import Any, List, Literal, Optional from argparse import ArgumentParser import threading import cv2 import numpy import onnxruntime import facefusion.globals import facefusion.processors.frame.core as frame_processors from facefusion import config, logger, wording from facefusion.execution_helper import apply_execution_provider_options from facefusion.face_analyser import get_one_face, get_many_faces, find_similar_faces, clear_face_analyser from facefusion.face_masker import create_static_box_mask, create_occlusion_mask, create_mouth_mask, clear_face_occluder, clear_face_parser from facefusion.face_helper import warp_face_by_face_landmark_5, warp_face_by_bounding_box, paste_back, create_bounding_box_from_landmark from facefusion.face_store import get_reference_faces from facefusion.content_analyser import clear_content_analyser from facefusion.typing import Face, VisionFrame, Update_Process, ProcessMode, ModelSet, OptionsWithModel, AudioFrame, QueuePayload from facefusion.filesystem import is_file, has_audio, resolve_relative_path from facefusion.download import conditional_download, is_download_done from facefusion.audio import read_static_audio, get_audio_frame from facefusion.filesystem import is_image, is_video, filter_audio_paths from facefusion.common_helper import get_first from facefusion.vision import read_image, write_image, read_static_image from facefusion.processors.frame.typings import LipSyncerInputs from facefusion.processors.frame import globals as frame_processors_globals from facefusion.processors.frame import choices as frame_processors_choices OPTIONS : Optional[OptionsWithModel] = None def set_options(key : Literal['model'], value : Any) -> None: global OPTIONS OPTIONS[key] = value
null
808
from typing import Any, List, Literal, Optional from argparse import ArgumentParser import threading import cv2 import numpy import onnxruntime import facefusion.globals import facefusion.processors.frame.core as frame_processors from facefusion import config, logger, wording from facefusion.execution_helper import apply_execution_provider_options from facefusion.face_analyser import get_one_face, get_many_faces, find_similar_faces, clear_face_analyser from facefusion.face_masker import create_static_box_mask, create_occlusion_mask, create_mouth_mask, clear_face_occluder, clear_face_parser from facefusion.face_helper import warp_face_by_face_landmark_5, warp_face_by_bounding_box, paste_back, create_bounding_box_from_landmark from facefusion.face_store import get_reference_faces from facefusion.content_analyser import clear_content_analyser from facefusion.typing import Face, VisionFrame, Update_Process, ProcessMode, ModelSet, OptionsWithModel, AudioFrame, QueuePayload from facefusion.filesystem import is_file, has_audio, resolve_relative_path from facefusion.download import conditional_download, is_download_done from facefusion.audio import read_static_audio, get_audio_frame from facefusion.filesystem import is_image, is_video, filter_audio_paths from facefusion.common_helper import get_first from facefusion.vision import read_image, write_image, read_static_image from facefusion.processors.frame.typings import LipSyncerInputs from facefusion.processors.frame import globals as frame_processors_globals from facefusion.processors.frame import choices as frame_processors_choices def register_args(program : ArgumentParser) -> None: program.add_argument('--lip-syncer-model', help = wording.get('help.lip_syncer_model'), default = config.get_str_value('frame_processors.lip_syncer_model', 'wav2lip_gan'), choices = frame_processors_choices.lip_syncer_models)
null
809
from typing import Any, List, Literal, Optional from argparse import ArgumentParser import threading import cv2 import numpy import onnxruntime import facefusion.globals import facefusion.processors.frame.core as frame_processors from facefusion import config, logger, wording from facefusion.execution_helper import apply_execution_provider_options from facefusion.face_analyser import get_one_face, get_many_faces, find_similar_faces, clear_face_analyser from facefusion.face_masker import create_static_box_mask, create_occlusion_mask, create_mouth_mask, clear_face_occluder, clear_face_parser from facefusion.face_helper import warp_face_by_face_landmark_5, warp_face_by_bounding_box, paste_back, create_bounding_box_from_landmark from facefusion.face_store import get_reference_faces from facefusion.content_analyser import clear_content_analyser from facefusion.typing import Face, VisionFrame, Update_Process, ProcessMode, ModelSet, OptionsWithModel, AudioFrame, QueuePayload from facefusion.filesystem import is_file, has_audio, resolve_relative_path from facefusion.download import conditional_download, is_download_done from facefusion.audio import read_static_audio, get_audio_frame from facefusion.filesystem import is_image, is_video, filter_audio_paths from facefusion.common_helper import get_first from facefusion.vision import read_image, write_image, read_static_image from facefusion.processors.frame.typings import LipSyncerInputs from facefusion.processors.frame import globals as frame_processors_globals from facefusion.processors.frame import choices as frame_processors_choices def apply_args(program : ArgumentParser) -> None: args = program.parse_args() frame_processors_globals.lip_syncer_model = args.lip_syncer_model
null
810
from typing import Any, List, Literal, Optional from argparse import ArgumentParser import threading import cv2 import numpy import onnxruntime import facefusion.globals import facefusion.processors.frame.core as frame_processors from facefusion import config, logger, wording from facefusion.execution_helper import apply_execution_provider_options from facefusion.face_analyser import get_one_face, get_many_faces, find_similar_faces, clear_face_analyser from facefusion.face_masker import create_static_box_mask, create_occlusion_mask, create_mouth_mask, clear_face_occluder, clear_face_parser from facefusion.face_helper import warp_face_by_face_landmark_5, warp_face_by_bounding_box, paste_back, create_bounding_box_from_landmark from facefusion.face_store import get_reference_faces from facefusion.content_analyser import clear_content_analyser from facefusion.typing import Face, VisionFrame, Update_Process, ProcessMode, ModelSet, OptionsWithModel, AudioFrame, QueuePayload from facefusion.filesystem import is_file, has_audio, resolve_relative_path from facefusion.download import conditional_download, is_download_done from facefusion.audio import read_static_audio, get_audio_frame from facefusion.filesystem import is_image, is_video, filter_audio_paths from facefusion.common_helper import get_first from facefusion.vision import read_image, write_image, read_static_image from facefusion.processors.frame.typings import LipSyncerInputs from facefusion.processors.frame import globals as frame_processors_globals from facefusion.processors.frame import choices as frame_processors_choices def get_options(key : Literal['model']) -> Any: global OPTIONS if OPTIONS is None: OPTIONS =\ { 'model': MODELS[frame_processors_globals.lip_syncer_model] } return OPTIONS.get(key) def resolve_relative_path(path : str) -> str: return os.path.abspath(os.path.join(os.path.dirname(__file__), path)) def conditional_download(download_directory_path : str, urls : List[str]) -> None: with ThreadPoolExecutor() as executor: for url in urls: executor.submit(get_download_size, url) for url in urls: download_file_path = os.path.join(download_directory_path, os.path.basename(url)) initial = os.path.getsize(download_file_path) if is_file(download_file_path) else 0 total = get_download_size(url) if initial < total: with tqdm(total = total, initial = initial, desc = wording.get('downloading'), unit = 'B', unit_scale = True, unit_divisor = 1024, ascii = ' =', disable = facefusion.globals.log_level in [ 'warn', 'error' ]) as progress: subprocess.Popen([ 'curl', '--create-dirs', '--silent', '--insecure', '--location', '--continue-at', '-', '--output', download_file_path, url ]) current = initial while current < total: if is_file(download_file_path): current = os.path.getsize(download_file_path) progress.update(current - progress.n) def pre_check() -> bool: if not facefusion.globals.skip_download: download_directory_path = resolve_relative_path('../.assets/models') model_url = get_options('model').get('url') conditional_download(download_directory_path, [ model_url ]) return True
null
811
from typing import Any, List, Literal, Optional from argparse import ArgumentParser import threading import cv2 import numpy import onnxruntime import facefusion.globals import facefusion.processors.frame.core as frame_processors from facefusion import config, logger, wording from facefusion.execution_helper import apply_execution_provider_options from facefusion.face_analyser import get_one_face, get_many_faces, find_similar_faces, clear_face_analyser from facefusion.face_masker import create_static_box_mask, create_occlusion_mask, create_mouth_mask, clear_face_occluder, clear_face_parser from facefusion.face_helper import warp_face_by_face_landmark_5, warp_face_by_bounding_box, paste_back, create_bounding_box_from_landmark from facefusion.face_store import get_reference_faces from facefusion.content_analyser import clear_content_analyser from facefusion.typing import Face, VisionFrame, Update_Process, ProcessMode, ModelSet, OptionsWithModel, AudioFrame, QueuePayload from facefusion.filesystem import is_file, has_audio, resolve_relative_path from facefusion.download import conditional_download, is_download_done from facefusion.audio import read_static_audio, get_audio_frame from facefusion.filesystem import is_image, is_video, filter_audio_paths from facefusion.common_helper import get_first from facefusion.vision import read_image, write_image, read_static_image from facefusion.processors.frame.typings import LipSyncerInputs from facefusion.processors.frame import globals as frame_processors_globals from facefusion.processors.frame import choices as frame_processors_choices NAME = __name__.upper() def get_options(key : Literal['model']) -> Any: def is_file(file_path : str) -> bool: def is_download_done(url : str, file_path : str) -> bool: def post_check() -> bool: model_url = get_options('model').get('url') model_path = get_options('model').get('path') if not facefusion.globals.skip_download and not is_download_done(model_url, model_path): logger.error(wording.get('model_download_not_done') + wording.get('exclamation_mark'), NAME) return False elif not is_file(model_path): logger.error(wording.get('model_file_not_present') + wording.get('exclamation_mark'), NAME) return False return True
null
812
from typing import Any, List, Literal, Optional from argparse import ArgumentParser import threading import cv2 import numpy import onnxruntime import facefusion.globals import facefusion.processors.frame.core as frame_processors from facefusion import config, logger, wording from facefusion.execution_helper import apply_execution_provider_options from facefusion.face_analyser import get_one_face, get_many_faces, find_similar_faces, clear_face_analyser from facefusion.face_masker import create_static_box_mask, create_occlusion_mask, create_mouth_mask, clear_face_occluder, clear_face_parser from facefusion.face_helper import warp_face_by_face_landmark_5, warp_face_by_bounding_box, paste_back, create_bounding_box_from_landmark from facefusion.face_store import get_reference_faces from facefusion.content_analyser import clear_content_analyser from facefusion.typing import Face, VisionFrame, Update_Process, ProcessMode, ModelSet, OptionsWithModel, AudioFrame, QueuePayload from facefusion.filesystem import is_file, has_audio, resolve_relative_path from facefusion.download import conditional_download, is_download_done from facefusion.audio import read_static_audio, get_audio_frame from facefusion.filesystem import is_image, is_video, filter_audio_paths from facefusion.common_helper import get_first from facefusion.vision import read_image, write_image, read_static_image from facefusion.processors.frame.typings import LipSyncerInputs from facefusion.processors.frame import globals as frame_processors_globals from facefusion.processors.frame import choices as frame_processors_choices NAME = __name__.upper() ProcessMode = Literal['output', 'preview', 'stream'] def has_audio(audio_paths : List[str]) -> bool: if audio_paths: return any(is_audio(audio_path) for audio_path in audio_paths) return False def is_image(image_path : str) -> bool: return is_file(image_path) and filetype.helpers.is_image(image_path) def is_video(video_path : str) -> bool: return is_file(video_path) and filetype.helpers.is_video(video_path) def pre_process(mode : ProcessMode) -> bool: if not has_audio(facefusion.globals.source_paths): logger.error(wording.get('select_audio_source') + wording.get('exclamation_mark'), NAME) return False if mode in [ 'output', 'preview' ] and not is_image(facefusion.globals.target_path) and not is_video(facefusion.globals.target_path): logger.error(wording.get('select_image_or_video_target') + wording.get('exclamation_mark'), NAME) return False if mode == 'output' and not facefusion.globals.output_path: logger.error(wording.get('select_file_or_directory_output') + wording.get('exclamation_mark'), NAME) return False return True
null
813
from typing import Any, List, Literal, Optional from argparse import ArgumentParser import threading import cv2 import numpy import onnxruntime import facefusion.globals import facefusion.processors.frame.core as frame_processors from facefusion import config, logger, wording from facefusion.execution_helper import apply_execution_provider_options from facefusion.face_analyser import get_one_face, get_many_faces, find_similar_faces, clear_face_analyser from facefusion.face_masker import create_static_box_mask, create_occlusion_mask, create_mouth_mask, clear_face_occluder, clear_face_parser from facefusion.face_helper import warp_face_by_face_landmark_5, warp_face_by_bounding_box, paste_back, create_bounding_box_from_landmark from facefusion.face_store import get_reference_faces from facefusion.content_analyser import clear_content_analyser from facefusion.typing import Face, VisionFrame, Update_Process, ProcessMode, ModelSet, OptionsWithModel, AudioFrame, QueuePayload from facefusion.filesystem import is_file, has_audio, resolve_relative_path from facefusion.download import conditional_download, is_download_done from facefusion.audio import read_static_audio, get_audio_frame from facefusion.filesystem import is_image, is_video, filter_audio_paths from facefusion.common_helper import get_first from facefusion.vision import read_image, write_image, read_static_image from facefusion.processors.frame.typings import LipSyncerInputs from facefusion.processors.frame import globals as frame_processors_globals from facefusion.processors.frame import choices as frame_processors_choices def clear_frame_processor() -> None: def clear_face_analyser() -> Any: def clear_face_occluder() -> None: def clear_face_parser() -> None: def clear_content_analyser() -> None: def read_static_audio(audio_path : str, fps : Fps) -> Optional[List[AudioFrame]]: def read_static_image(image_path : str) -> Optional[VisionFrame]: def post_process() -> None: read_static_image.cache_clear() read_static_audio.cache_clear() if facefusion.globals.video_memory_strategy == 'strict' or facefusion.globals.video_memory_strategy == 'moderate': clear_frame_processor() if facefusion.globals.video_memory_strategy == 'strict': clear_face_analyser() clear_content_analyser() clear_face_occluder() clear_face_parser()
null
814
from typing import Any, List, Literal, Optional from argparse import ArgumentParser import threading import cv2 import numpy import onnxruntime import facefusion.globals import facefusion.processors.frame.core as frame_processors from facefusion import config, logger, wording from facefusion.execution_helper import apply_execution_provider_options from facefusion.face_analyser import get_one_face, get_many_faces, find_similar_faces, clear_face_analyser from facefusion.face_masker import create_static_box_mask, create_occlusion_mask, create_mouth_mask, clear_face_occluder, clear_face_parser from facefusion.face_helper import warp_face_by_face_landmark_5, warp_face_by_bounding_box, paste_back, create_bounding_box_from_landmark from facefusion.face_store import get_reference_faces from facefusion.content_analyser import clear_content_analyser from facefusion.typing import Face, VisionFrame, Update_Process, ProcessMode, ModelSet, OptionsWithModel, AudioFrame, QueuePayload from facefusion.filesystem import is_file, has_audio, resolve_relative_path from facefusion.download import conditional_download, is_download_done from facefusion.audio import read_static_audio, get_audio_frame from facefusion.filesystem import is_image, is_video, filter_audio_paths from facefusion.common_helper import get_first from facefusion.vision import read_image, write_image, read_static_image from facefusion.processors.frame.typings import LipSyncerInputs from facefusion.processors.frame import globals as frame_processors_globals from facefusion.processors.frame import choices as frame_processors_choices Face = namedtuple('Face', [ 'bounding_box', 'landmark', 'score', 'embedding', 'normed_embedding', 'gender', 'age' ]) VisionFrame = numpy.ndarray[Any, Any] def get_reference_frame(source_face : Face, target_face : Face, temp_vision_frame : VisionFrame) -> VisionFrame: pass
null
815
from typing import Any, List, Literal, Optional from argparse import ArgumentParser import threading import cv2 import numpy import onnxruntime import facefusion.globals import facefusion.processors.frame.core as frame_processors from facefusion import config, logger, wording from facefusion.execution_helper import apply_execution_provider_options from facefusion.face_analyser import get_one_face, get_many_faces, find_similar_faces, clear_face_analyser from facefusion.face_masker import create_static_box_mask, create_occlusion_mask, create_mouth_mask, clear_face_occluder, clear_face_parser from facefusion.face_helper import warp_face_by_face_landmark_5, warp_face_by_bounding_box, paste_back, create_bounding_box_from_landmark from facefusion.face_store import get_reference_faces from facefusion.content_analyser import clear_content_analyser from facefusion.typing import Face, VisionFrame, Update_Process, ProcessMode, ModelSet, OptionsWithModel, AudioFrame, QueuePayload from facefusion.filesystem import is_file, has_audio, resolve_relative_path from facefusion.download import conditional_download, is_download_done from facefusion.audio import read_static_audio, get_audio_frame from facefusion.filesystem import is_image, is_video, filter_audio_paths from facefusion.common_helper import get_first from facefusion.vision import read_image, write_image, read_static_image from facefusion.processors.frame.typings import LipSyncerInputs from facefusion.processors.frame import globals as frame_processors_globals from facefusion.processors.frame import choices as frame_processors_choices def process_frame(inputs : LipSyncerInputs) -> VisionFrame: def get_reference_faces() -> Optional[FaceSet]: def get_audio_frame(audio_path : str, fps : Fps, frame_number : int = 0) -> Optional[AudioFrame]: def filter_audio_paths(paths : List[str]) -> List[str]: def get_first(__list__ : Any) -> Any: def read_static_image(image_path : str) -> Optional[VisionFrame]: def write_image(image_path : str, frame : VisionFrame) -> bool: def process_image(source_paths : List[str], target_path : str, output_path : str) -> None: reference_faces = get_reference_faces() if 'reference' in facefusion.globals.face_selector_mode else None source_audio_path = get_first(filter_audio_paths(source_paths)) source_audio_frame = get_audio_frame(source_audio_path, 25) target_vision_frame = read_static_image(target_path) result_frame = process_frame( { 'reference_faces': reference_faces, 'source_audio_frame': source_audio_frame, 'target_vision_frame': target_vision_frame }) write_image(output_path, result_frame)
null
816
from typing import Any, List, Literal, Optional from argparse import ArgumentParser import threading import cv2 import numpy import onnxruntime import facefusion.globals import facefusion.processors.frame.core as frame_processors from facefusion import config, logger, wording from facefusion.execution_helper import apply_execution_provider_options from facefusion.face_analyser import get_one_face, get_many_faces, find_similar_faces, clear_face_analyser from facefusion.face_masker import create_static_box_mask, create_occlusion_mask, create_mouth_mask, clear_face_occluder, clear_face_parser from facefusion.face_helper import warp_face_by_face_landmark_5, warp_face_by_bounding_box, paste_back, create_bounding_box_from_landmark from facefusion.face_store import get_reference_faces from facefusion.content_analyser import clear_content_analyser from facefusion.typing import Face, VisionFrame, Update_Process, ProcessMode, ModelSet, OptionsWithModel, AudioFrame, QueuePayload from facefusion.filesystem import is_file, has_audio, resolve_relative_path from facefusion.download import conditional_download, is_download_done from facefusion.audio import read_static_audio, get_audio_frame from facefusion.filesystem import is_image, is_video, filter_audio_paths from facefusion.common_helper import get_first from facefusion.vision import read_image, write_image, read_static_image from facefusion.processors.frame.typings import LipSyncerInputs from facefusion.processors.frame import globals as frame_processors_globals from facefusion.processors.frame import choices as frame_processors_choices def process_frames(source_paths : List[str], queue_payloads : List[QueuePayload], update_progress : Update_Process) -> None: reference_faces = get_reference_faces() if 'reference' in facefusion.globals.face_selector_mode else None source_audio_path = get_first(filter_audio_paths(source_paths)) target_video_fps = facefusion.globals.output_video_fps for queue_payload in queue_payloads: frame_number = queue_payload['frame_number'] target_vision_path = queue_payload['frame_path'] source_audio_frame = get_audio_frame(source_audio_path, target_video_fps, frame_number) target_vision_frame = read_image(target_vision_path) result_frame = process_frame( { 'reference_faces': reference_faces, 'source_audio_frame': source_audio_frame, 'target_vision_frame': target_vision_frame }) write_image(target_vision_path, result_frame) update_progress() def process_video(source_paths : List[str], temp_frame_paths : List[str]) -> None: frame_processors.multi_process_frames(source_paths, temp_frame_paths, process_frames)
null
817
from typing import Any, List, Literal, Optional from argparse import ArgumentParser import cv2 import threading import numpy import onnxruntime import facefusion.globals import facefusion.processors.frame.core as frame_processors from facefusion import config, logger, wording from facefusion.face_analyser import get_many_faces, clear_face_analyser, find_similar_faces, get_one_face from facefusion.face_masker import create_static_box_mask, create_occlusion_mask, clear_face_occluder from facefusion.face_helper import warp_face_by_face_landmark_5, paste_back from facefusion.execution_helper import apply_execution_provider_options from facefusion.content_analyser import clear_content_analyser from facefusion.face_store import get_reference_faces from facefusion.typing import Face, VisionFrame, Update_Process, ProcessMode, ModelSet, OptionsWithModel, QueuePayload from facefusion.common_helper import create_metavar from facefusion.filesystem import is_file, is_image, is_video, resolve_relative_path from facefusion.download import conditional_download, is_download_done from facefusion.vision import read_image, read_static_image, write_image from facefusion.processors.frame.typings import FaceEnhancerInputs from facefusion.processors.frame import globals as frame_processors_globals from facefusion.processors.frame import choices as frame_processors_choices OPTIONS : Optional[OptionsWithModel] = None def set_options(key : Literal['model'], value : Any) -> None: global OPTIONS OPTIONS[key] = value
null
818
from typing import Any, List, Literal, Optional from argparse import ArgumentParser import cv2 import threading import numpy import onnxruntime import facefusion.globals import facefusion.processors.frame.core as frame_processors from facefusion import config, logger, wording from facefusion.face_analyser import get_many_faces, clear_face_analyser, find_similar_faces, get_one_face from facefusion.face_masker import create_static_box_mask, create_occlusion_mask, clear_face_occluder from facefusion.face_helper import warp_face_by_face_landmark_5, paste_back from facefusion.execution_helper import apply_execution_provider_options from facefusion.content_analyser import clear_content_analyser from facefusion.face_store import get_reference_faces from facefusion.typing import Face, VisionFrame, Update_Process, ProcessMode, ModelSet, OptionsWithModel, QueuePayload from facefusion.common_helper import create_metavar from facefusion.filesystem import is_file, is_image, is_video, resolve_relative_path from facefusion.download import conditional_download, is_download_done from facefusion.vision import read_image, read_static_image, write_image from facefusion.processors.frame.typings import FaceEnhancerInputs from facefusion.processors.frame import globals as frame_processors_globals from facefusion.processors.frame import choices as frame_processors_choices def create_metavar(ranges : List[Any]) -> str: return '[' + str(ranges[0]) + '-' + str(ranges[-1]) + ']' def register_args(program : ArgumentParser) -> None: program.add_argument('--face-enhancer-model', help = wording.get('help.face_enhancer_model'), default = config.get_str_value('frame_processors.face_enhancer_model', 'gfpgan_1.4'), choices = frame_processors_choices.face_enhancer_models) program.add_argument('--face-enhancer-blend', help = wording.get('help.face_enhancer_blend'), type = int, default = config.get_int_value('frame_processors.face_enhancer_blend', '80'), choices = frame_processors_choices.face_enhancer_blend_range, metavar = create_metavar(frame_processors_choices.face_enhancer_blend_range))
null
819
from typing import Any, List, Literal, Optional from argparse import ArgumentParser import cv2 import threading import numpy import onnxruntime import facefusion.globals import facefusion.processors.frame.core as frame_processors from facefusion import config, logger, wording from facefusion.face_analyser import get_many_faces, clear_face_analyser, find_similar_faces, get_one_face from facefusion.face_masker import create_static_box_mask, create_occlusion_mask, clear_face_occluder from facefusion.face_helper import warp_face_by_face_landmark_5, paste_back from facefusion.execution_helper import apply_execution_provider_options from facefusion.content_analyser import clear_content_analyser from facefusion.face_store import get_reference_faces from facefusion.typing import Face, VisionFrame, Update_Process, ProcessMode, ModelSet, OptionsWithModel, QueuePayload from facefusion.common_helper import create_metavar from facefusion.filesystem import is_file, is_image, is_video, resolve_relative_path from facefusion.download import conditional_download, is_download_done from facefusion.vision import read_image, read_static_image, write_image from facefusion.processors.frame.typings import FaceEnhancerInputs from facefusion.processors.frame import globals as frame_processors_globals from facefusion.processors.frame import choices as frame_processors_choices def apply_args(program : ArgumentParser) -> None: args = program.parse_args() frame_processors_globals.face_enhancer_model = args.face_enhancer_model frame_processors_globals.face_enhancer_blend = args.face_enhancer_blend
null
820
from typing import Any, List, Literal, Optional from argparse import ArgumentParser import cv2 import threading import numpy import onnxruntime import facefusion.globals import facefusion.processors.frame.core as frame_processors from facefusion import config, logger, wording from facefusion.face_analyser import get_many_faces, clear_face_analyser, find_similar_faces, get_one_face from facefusion.face_masker import create_static_box_mask, create_occlusion_mask, clear_face_occluder from facefusion.face_helper import warp_face_by_face_landmark_5, paste_back from facefusion.execution_helper import apply_execution_provider_options from facefusion.content_analyser import clear_content_analyser from facefusion.face_store import get_reference_faces from facefusion.typing import Face, VisionFrame, Update_Process, ProcessMode, ModelSet, OptionsWithModel, QueuePayload from facefusion.common_helper import create_metavar from facefusion.filesystem import is_file, is_image, is_video, resolve_relative_path from facefusion.download import conditional_download, is_download_done from facefusion.vision import read_image, read_static_image, write_image from facefusion.processors.frame.typings import FaceEnhancerInputs from facefusion.processors.frame import globals as frame_processors_globals from facefusion.processors.frame import choices as frame_processors_choices def get_options(key : Literal['model']) -> Any: global OPTIONS if OPTIONS is None: OPTIONS =\ { 'model': MODELS[frame_processors_globals.face_enhancer_model] } return OPTIONS.get(key) def resolve_relative_path(path : str) -> str: return os.path.abspath(os.path.join(os.path.dirname(__file__), path)) def conditional_download(download_directory_path : str, urls : List[str]) -> None: with ThreadPoolExecutor() as executor: for url in urls: executor.submit(get_download_size, url) for url in urls: download_file_path = os.path.join(download_directory_path, os.path.basename(url)) initial = os.path.getsize(download_file_path) if is_file(download_file_path) else 0 total = get_download_size(url) if initial < total: with tqdm(total = total, initial = initial, desc = wording.get('downloading'), unit = 'B', unit_scale = True, unit_divisor = 1024, ascii = ' =', disable = facefusion.globals.log_level in [ 'warn', 'error' ]) as progress: subprocess.Popen([ 'curl', '--create-dirs', '--silent', '--insecure', '--location', '--continue-at', '-', '--output', download_file_path, url ]) current = initial while current < total: if is_file(download_file_path): current = os.path.getsize(download_file_path) progress.update(current - progress.n) def pre_check() -> bool: if not facefusion.globals.skip_download: download_directory_path = resolve_relative_path('../.assets/models') model_url = get_options('model').get('url') conditional_download(download_directory_path, [ model_url ]) return True
null
821
from typing import Any, List, Literal, Optional from argparse import ArgumentParser import cv2 import threading import numpy import onnxruntime import facefusion.globals import facefusion.processors.frame.core as frame_processors from facefusion import config, logger, wording from facefusion.face_analyser import get_many_faces, clear_face_analyser, find_similar_faces, get_one_face from facefusion.face_masker import create_static_box_mask, create_occlusion_mask, clear_face_occluder from facefusion.face_helper import warp_face_by_face_landmark_5, paste_back from facefusion.execution_helper import apply_execution_provider_options from facefusion.content_analyser import clear_content_analyser from facefusion.face_store import get_reference_faces from facefusion.typing import Face, VisionFrame, Update_Process, ProcessMode, ModelSet, OptionsWithModel, QueuePayload from facefusion.common_helper import create_metavar from facefusion.filesystem import is_file, is_image, is_video, resolve_relative_path from facefusion.download import conditional_download, is_download_done from facefusion.vision import read_image, read_static_image, write_image from facefusion.processors.frame.typings import FaceEnhancerInputs from facefusion.processors.frame import globals as frame_processors_globals from facefusion.processors.frame import choices as frame_processors_choices NAME = __name__.upper() def get_options(key : Literal['model']) -> Any: global OPTIONS if OPTIONS is None: OPTIONS =\ { 'model': MODELS[frame_processors_globals.face_enhancer_model] } return OPTIONS.get(key) def is_file(file_path : str) -> bool: return bool(file_path and os.path.isfile(file_path)) def is_download_done(url : str, file_path : str) -> bool: if is_file(file_path): return get_download_size(url) == os.path.getsize(file_path) return False def post_check() -> bool: model_url = get_options('model').get('url') model_path = get_options('model').get('path') if not facefusion.globals.skip_download and not is_download_done(model_url, model_path): logger.error(wording.get('model_download_not_done') + wording.get('exclamation_mark'), NAME) return False elif not is_file(model_path): logger.error(wording.get('model_file_not_present') + wording.get('exclamation_mark'), NAME) return False return True
null
822
from typing import Any, List, Literal, Optional from argparse import ArgumentParser import cv2 import threading import numpy import onnxruntime import facefusion.globals import facefusion.processors.frame.core as frame_processors from facefusion import config, logger, wording from facefusion.face_analyser import get_many_faces, clear_face_analyser, find_similar_faces, get_one_face from facefusion.face_masker import create_static_box_mask, create_occlusion_mask, clear_face_occluder from facefusion.face_helper import warp_face_by_face_landmark_5, paste_back from facefusion.execution_helper import apply_execution_provider_options from facefusion.content_analyser import clear_content_analyser from facefusion.face_store import get_reference_faces from facefusion.typing import Face, VisionFrame, Update_Process, ProcessMode, ModelSet, OptionsWithModel, QueuePayload from facefusion.common_helper import create_metavar from facefusion.filesystem import is_file, is_image, is_video, resolve_relative_path from facefusion.download import conditional_download, is_download_done from facefusion.vision import read_image, read_static_image, write_image from facefusion.processors.frame.typings import FaceEnhancerInputs from facefusion.processors.frame import globals as frame_processors_globals from facefusion.processors.frame import choices as frame_processors_choices NAME = __name__.upper() ProcessMode = Literal['output', 'preview', 'stream'] def is_image(image_path : str) -> bool: return is_file(image_path) and filetype.helpers.is_image(image_path) def is_video(video_path : str) -> bool: return is_file(video_path) and filetype.helpers.is_video(video_path) def pre_process(mode : ProcessMode) -> bool: if mode in [ 'output', 'preview' ] and not is_image(facefusion.globals.target_path) and not is_video(facefusion.globals.target_path): logger.error(wording.get('select_image_or_video_target') + wording.get('exclamation_mark'), NAME) return False if mode == 'output' and not facefusion.globals.output_path: logger.error(wording.get('select_file_or_directory_output') + wording.get('exclamation_mark'), NAME) return False return True
null
823
from typing import Any, List, Literal, Optional from argparse import ArgumentParser import cv2 import threading import numpy import onnxruntime import facefusion.globals import facefusion.processors.frame.core as frame_processors from facefusion import config, logger, wording from facefusion.face_analyser import get_many_faces, clear_face_analyser, find_similar_faces, get_one_face from facefusion.face_masker import create_static_box_mask, create_occlusion_mask, clear_face_occluder from facefusion.face_helper import warp_face_by_face_landmark_5, paste_back from facefusion.execution_helper import apply_execution_provider_options from facefusion.content_analyser import clear_content_analyser from facefusion.face_store import get_reference_faces from facefusion.typing import Face, VisionFrame, Update_Process, ProcessMode, ModelSet, OptionsWithModel, QueuePayload from facefusion.common_helper import create_metavar from facefusion.filesystem import is_file, is_image, is_video, resolve_relative_path from facefusion.download import conditional_download, is_download_done from facefusion.vision import read_image, read_static_image, write_image from facefusion.processors.frame.typings import FaceEnhancerInputs from facefusion.processors.frame import globals as frame_processors_globals from facefusion.processors.frame import choices as frame_processors_choices def clear_frame_processor() -> None: global FRAME_PROCESSOR FRAME_PROCESSOR = None def clear_face_analyser() -> Any: global FACE_ANALYSER FACE_ANALYSER = None def clear_face_occluder() -> None: global FACE_OCCLUDER FACE_OCCLUDER = None def clear_content_analyser() -> None: global CONTENT_ANALYSER CONTENT_ANALYSER = None def read_static_image(image_path : str) -> Optional[VisionFrame]: return read_image(image_path) def post_process() -> None: read_static_image.cache_clear() if facefusion.globals.video_memory_strategy == 'strict' or facefusion.globals.video_memory_strategy == 'moderate': clear_frame_processor() if facefusion.globals.video_memory_strategy == 'strict': clear_face_analyser() clear_content_analyser() clear_face_occluder()
null
824
from typing import Any, List, Literal, Optional from argparse import ArgumentParser import cv2 import threading import numpy import onnxruntime import facefusion.globals import facefusion.processors.frame.core as frame_processors from facefusion import config, logger, wording from facefusion.face_analyser import get_many_faces, clear_face_analyser, find_similar_faces, get_one_face from facefusion.face_masker import create_static_box_mask, create_occlusion_mask, clear_face_occluder from facefusion.face_helper import warp_face_by_face_landmark_5, paste_back from facefusion.execution_helper import apply_execution_provider_options from facefusion.content_analyser import clear_content_analyser from facefusion.face_store import get_reference_faces from facefusion.typing import Face, VisionFrame, Update_Process, ProcessMode, ModelSet, OptionsWithModel, QueuePayload from facefusion.common_helper import create_metavar from facefusion.filesystem import is_file, is_image, is_video, resolve_relative_path from facefusion.download import conditional_download, is_download_done from facefusion.vision import read_image, read_static_image, write_image from facefusion.processors.frame.typings import FaceEnhancerInputs from facefusion.processors.frame import globals as frame_processors_globals from facefusion.processors.frame import choices as frame_processors_choices def enhance_face(target_face: Face, temp_vision_frame : VisionFrame) -> VisionFrame: model_template = get_options('model').get('template') model_size = get_options('model').get('size') crop_vision_frame, affine_matrix = warp_face_by_face_landmark_5(temp_vision_frame, target_face.landmark['5/68'], model_template, model_size) box_mask = create_static_box_mask(crop_vision_frame.shape[:2][::-1], facefusion.globals.face_mask_blur, (0, 0, 0, 0)) crop_mask_list =\ [ box_mask ] if 'occlusion' in facefusion.globals.face_mask_types: occlusion_mask = create_occlusion_mask(crop_vision_frame) crop_mask_list.append(occlusion_mask) crop_vision_frame = prepare_crop_frame(crop_vision_frame) crop_vision_frame = apply_enhance(crop_vision_frame) crop_vision_frame = normalize_crop_frame(crop_vision_frame) crop_mask = numpy.minimum.reduce(crop_mask_list).clip(0, 1) paste_vision_frame = paste_back(temp_vision_frame, crop_vision_frame, crop_mask, affine_matrix) temp_vision_frame = blend_frame(temp_vision_frame, paste_vision_frame) return temp_vision_frame Face = namedtuple('Face', [ 'bounding_box', 'landmark', 'score', 'embedding', 'normed_embedding', 'gender', 'age' ]) VisionFrame = numpy.ndarray[Any, Any] def get_reference_frame(source_face : Face, target_face : Face, temp_vision_frame : VisionFrame) -> VisionFrame: return enhance_face(target_face, temp_vision_frame)
null
825
from typing import Any, List, Literal, Optional from argparse import ArgumentParser import cv2 import threading import numpy import onnxruntime import facefusion.globals import facefusion.processors.frame.core as frame_processors from facefusion import config, logger, wording from facefusion.face_analyser import get_many_faces, clear_face_analyser, find_similar_faces, get_one_face from facefusion.face_masker import create_static_box_mask, create_occlusion_mask, clear_face_occluder from facefusion.face_helper import warp_face_by_face_landmark_5, paste_back from facefusion.execution_helper import apply_execution_provider_options from facefusion.content_analyser import clear_content_analyser from facefusion.face_store import get_reference_faces from facefusion.typing import Face, VisionFrame, Update_Process, ProcessMode, ModelSet, OptionsWithModel, QueuePayload from facefusion.common_helper import create_metavar from facefusion.filesystem import is_file, is_image, is_video, resolve_relative_path from facefusion.download import conditional_download, is_download_done from facefusion.vision import read_image, read_static_image, write_image from facefusion.processors.frame.typings import FaceEnhancerInputs from facefusion.processors.frame import globals as frame_processors_globals from facefusion.processors.frame import choices as frame_processors_choices def process_frame(inputs : FaceEnhancerInputs) -> VisionFrame: reference_faces = inputs['reference_faces'] target_vision_frame = inputs['target_vision_frame'] if 'reference' in facefusion.globals.face_selector_mode: similar_faces = find_similar_faces(reference_faces, target_vision_frame, facefusion.globals.reference_face_distance) if similar_faces: for similar_face in similar_faces: target_vision_frame = enhance_face(similar_face, target_vision_frame) if 'one' in facefusion.globals.face_selector_mode: target_face = get_one_face(target_vision_frame) if target_face: target_vision_frame = enhance_face(target_face, target_vision_frame) if 'many' in facefusion.globals.face_selector_mode: many_faces = get_many_faces(target_vision_frame) if many_faces: for target_face in many_faces: target_vision_frame = enhance_face(target_face, target_vision_frame) return target_vision_frame def get_reference_faces() -> Optional[FaceSet]: if FACE_STORE['reference_faces']: return FACE_STORE['reference_faces'] return None def read_static_image(image_path : str) -> Optional[VisionFrame]: return read_image(image_path) def write_image(image_path : str, frame : VisionFrame) -> bool: if image_path: return cv2.imwrite(image_path, frame) return False def process_image(source_path : str, target_path : str, output_path : str) -> None: reference_faces = get_reference_faces() if 'reference' in facefusion.globals.face_selector_mode else None target_vision_frame = read_static_image(target_path) result_frame = process_frame( { 'reference_faces': reference_faces, 'target_vision_frame': target_vision_frame }) write_image(output_path, result_frame)
null
826
from typing import Any, List, Literal, Optional from argparse import ArgumentParser import cv2 import threading import numpy import onnxruntime import facefusion.globals import facefusion.processors.frame.core as frame_processors from facefusion import config, logger, wording from facefusion.face_analyser import get_many_faces, clear_face_analyser, find_similar_faces, get_one_face from facefusion.face_masker import create_static_box_mask, create_occlusion_mask, clear_face_occluder from facefusion.face_helper import warp_face_by_face_landmark_5, paste_back from facefusion.execution_helper import apply_execution_provider_options from facefusion.content_analyser import clear_content_analyser from facefusion.face_store import get_reference_faces from facefusion.typing import Face, VisionFrame, Update_Process, ProcessMode, ModelSet, OptionsWithModel, QueuePayload from facefusion.common_helper import create_metavar from facefusion.filesystem import is_file, is_image, is_video, resolve_relative_path from facefusion.download import conditional_download, is_download_done from facefusion.vision import read_image, read_static_image, write_image from facefusion.processors.frame.typings import FaceEnhancerInputs from facefusion.processors.frame import globals as frame_processors_globals from facefusion.processors.frame import choices as frame_processors_choices def process_frames(source_path : List[str], queue_payloads : List[QueuePayload], update_progress : Update_Process) -> None: reference_faces = get_reference_faces() if 'reference' in facefusion.globals.face_selector_mode else None for queue_payload in queue_payloads: target_vision_path = queue_payload['frame_path'] target_vision_frame = read_image(target_vision_path) result_frame = process_frame( { 'reference_faces': reference_faces, 'target_vision_frame': target_vision_frame }) write_image(target_vision_path, result_frame) update_progress() def process_video(source_paths : List[str], temp_frame_paths : List[str]) -> None: frame_processors.multi_process_frames(None, temp_frame_paths, process_frames)
null
827
from typing import Any, List, Literal, Optional from argparse import ArgumentParser import threading import numpy import onnx import onnxruntime from onnx import numpy_helper import facefusion.globals import facefusion.processors.frame.core as frame_processors from facefusion import config, logger, wording from facefusion.execution_helper import apply_execution_provider_options from facefusion.face_analyser import get_one_face, get_average_face, get_many_faces, find_similar_faces, clear_face_analyser from facefusion.face_masker import create_static_box_mask, create_occlusion_mask, create_region_mask, clear_face_occluder, clear_face_parser from facefusion.face_helper import warp_face_by_face_landmark_5, paste_back from facefusion.face_store import get_reference_faces from facefusion.content_analyser import clear_content_analyser from facefusion.typing import Face, Embedding, VisionFrame, Update_Process, ProcessMode, ModelSet, OptionsWithModel, QueuePayload from facefusion.filesystem import is_file, is_image, has_image, is_video, filter_image_paths, resolve_relative_path from facefusion.download import conditional_download, is_download_done from facefusion.vision import read_image, read_static_image, read_static_images, write_image from facefusion.processors.frame.typings import FaceSwapperInputs from facefusion.processors.frame import globals as frame_processors_globals from facefusion.processors.frame import choices as frame_processors_choices OPTIONS : Optional[OptionsWithModel] = None def set_options(key : Literal['model'], value : Any) -> None: global OPTIONS OPTIONS[key] = value
null
828
from typing import Any, List, Literal, Optional from argparse import ArgumentParser import threading import numpy import onnx import onnxruntime from onnx import numpy_helper import facefusion.globals import facefusion.processors.frame.core as frame_processors from facefusion import config, logger, wording from facefusion.execution_helper import apply_execution_provider_options from facefusion.face_analyser import get_one_face, get_average_face, get_many_faces, find_similar_faces, clear_face_analyser from facefusion.face_masker import create_static_box_mask, create_occlusion_mask, create_region_mask, clear_face_occluder, clear_face_parser from facefusion.face_helper import warp_face_by_face_landmark_5, paste_back from facefusion.face_store import get_reference_faces from facefusion.content_analyser import clear_content_analyser from facefusion.typing import Face, Embedding, VisionFrame, Update_Process, ProcessMode, ModelSet, OptionsWithModel, QueuePayload from facefusion.filesystem import is_file, is_image, has_image, is_video, filter_image_paths, resolve_relative_path from facefusion.download import conditional_download, is_download_done from facefusion.vision import read_image, read_static_image, read_static_images, write_image from facefusion.processors.frame.typings import FaceSwapperInputs from facefusion.processors.frame import globals as frame_processors_globals from facefusion.processors.frame import choices as frame_processors_choices def register_args(program : ArgumentParser) -> None: if onnxruntime.__version__ == '1.17.0': face_swapper_model_fallback = 'inswapper_128' else: face_swapper_model_fallback = 'inswapper_128_fp16' program.add_argument('--face-swapper-model', help = wording.get('help.face_swapper_model'), default = config.get_str_value('frame_processors.face_swapper_model', face_swapper_model_fallback), choices = frame_processors_choices.face_swapper_models)
null
829
from typing import Any, List, Literal, Optional from argparse import ArgumentParser import threading import numpy import onnx import onnxruntime from onnx import numpy_helper import facefusion.globals import facefusion.processors.frame.core as frame_processors from facefusion import config, logger, wording from facefusion.execution_helper import apply_execution_provider_options from facefusion.face_analyser import get_one_face, get_average_face, get_many_faces, find_similar_faces, clear_face_analyser from facefusion.face_masker import create_static_box_mask, create_occlusion_mask, create_region_mask, clear_face_occluder, clear_face_parser from facefusion.face_helper import warp_face_by_face_landmark_5, paste_back from facefusion.face_store import get_reference_faces from facefusion.content_analyser import clear_content_analyser from facefusion.typing import Face, Embedding, VisionFrame, Update_Process, ProcessMode, ModelSet, OptionsWithModel, QueuePayload from facefusion.filesystem import is_file, is_image, has_image, is_video, filter_image_paths, resolve_relative_path from facefusion.download import conditional_download, is_download_done from facefusion.vision import read_image, read_static_image, read_static_images, write_image from facefusion.processors.frame.typings import FaceSwapperInputs from facefusion.processors.frame import globals as frame_processors_globals from facefusion.processors.frame import choices as frame_processors_choices def apply_args(program : ArgumentParser) -> None: args = program.parse_args() frame_processors_globals.face_swapper_model = args.face_swapper_model if args.face_swapper_model == 'blendswap_256': facefusion.globals.face_recognizer_model = 'arcface_blendswap' if args.face_swapper_model == 'inswapper_128' or args.face_swapper_model == 'inswapper_128_fp16': facefusion.globals.face_recognizer_model = 'arcface_inswapper' if args.face_swapper_model == 'simswap_256' or args.face_swapper_model == 'simswap_512_unofficial': facefusion.globals.face_recognizer_model = 'arcface_simswap' if args.face_swapper_model == 'uniface_256': facefusion.globals.face_recognizer_model = 'arcface_uniface'
null
830
from typing import Any, List, Literal, Optional from argparse import ArgumentParser import threading import numpy import onnx import onnxruntime from onnx import numpy_helper import facefusion.globals import facefusion.processors.frame.core as frame_processors from facefusion import config, logger, wording from facefusion.execution_helper import apply_execution_provider_options from facefusion.face_analyser import get_one_face, get_average_face, get_many_faces, find_similar_faces, clear_face_analyser from facefusion.face_masker import create_static_box_mask, create_occlusion_mask, create_region_mask, clear_face_occluder, clear_face_parser from facefusion.face_helper import warp_face_by_face_landmark_5, paste_back from facefusion.face_store import get_reference_faces from facefusion.content_analyser import clear_content_analyser from facefusion.typing import Face, Embedding, VisionFrame, Update_Process, ProcessMode, ModelSet, OptionsWithModel, QueuePayload from facefusion.filesystem import is_file, is_image, has_image, is_video, filter_image_paths, resolve_relative_path from facefusion.download import conditional_download, is_download_done from facefusion.vision import read_image, read_static_image, read_static_images, write_image from facefusion.processors.frame.typings import FaceSwapperInputs from facefusion.processors.frame import globals as frame_processors_globals from facefusion.processors.frame import choices as frame_processors_choices def get_options(key : Literal['model']) -> Any: global OPTIONS if OPTIONS is None: OPTIONS =\ { 'model': MODELS[frame_processors_globals.face_swapper_model] } return OPTIONS.get(key) def resolve_relative_path(path : str) -> str: return os.path.abspath(os.path.join(os.path.dirname(__file__), path)) def conditional_download(download_directory_path : str, urls : List[str]) -> None: with ThreadPoolExecutor() as executor: for url in urls: executor.submit(get_download_size, url) for url in urls: download_file_path = os.path.join(download_directory_path, os.path.basename(url)) initial = os.path.getsize(download_file_path) if is_file(download_file_path) else 0 total = get_download_size(url) if initial < total: with tqdm(total = total, initial = initial, desc = wording.get('downloading'), unit = 'B', unit_scale = True, unit_divisor = 1024, ascii = ' =', disable = facefusion.globals.log_level in [ 'warn', 'error' ]) as progress: subprocess.Popen([ 'curl', '--create-dirs', '--silent', '--insecure', '--location', '--continue-at', '-', '--output', download_file_path, url ]) current = initial while current < total: if is_file(download_file_path): current = os.path.getsize(download_file_path) progress.update(current - progress.n) def pre_check() -> bool: if not facefusion.globals.skip_download: download_directory_path = resolve_relative_path('../.assets/models') model_url = get_options('model').get('url') conditional_download(download_directory_path, [ model_url ]) return True
null
831
from typing import Any, List, Literal, Optional from argparse import ArgumentParser import threading import numpy import onnx import onnxruntime from onnx import numpy_helper import facefusion.globals import facefusion.processors.frame.core as frame_processors from facefusion import config, logger, wording from facefusion.execution_helper import apply_execution_provider_options from facefusion.face_analyser import get_one_face, get_average_face, get_many_faces, find_similar_faces, clear_face_analyser from facefusion.face_masker import create_static_box_mask, create_occlusion_mask, create_region_mask, clear_face_occluder, clear_face_parser from facefusion.face_helper import warp_face_by_face_landmark_5, paste_back from facefusion.face_store import get_reference_faces from facefusion.content_analyser import clear_content_analyser from facefusion.typing import Face, Embedding, VisionFrame, Update_Process, ProcessMode, ModelSet, OptionsWithModel, QueuePayload from facefusion.filesystem import is_file, is_image, has_image, is_video, filter_image_paths, resolve_relative_path from facefusion.download import conditional_download, is_download_done from facefusion.vision import read_image, read_static_image, read_static_images, write_image from facefusion.processors.frame.typings import FaceSwapperInputs from facefusion.processors.frame import globals as frame_processors_globals from facefusion.processors.frame import choices as frame_processors_choices NAME = __name__.upper() def get_options(key : Literal['model']) -> Any: def is_file(file_path : str) -> bool: def is_download_done(url : str, file_path : str) -> bool: def post_check() -> bool: model_url = get_options('model').get('url') model_path = get_options('model').get('path') if not facefusion.globals.skip_download and not is_download_done(model_url, model_path): logger.error(wording.get('model_download_not_done') + wording.get('exclamation_mark'), NAME) return False elif not is_file(model_path): logger.error(wording.get('model_file_not_present') + wording.get('exclamation_mark'), NAME) return False return True
null
832
from typing import Any, List, Literal, Optional from argparse import ArgumentParser import threading import numpy import onnx import onnxruntime from onnx import numpy_helper import facefusion.globals import facefusion.processors.frame.core as frame_processors from facefusion import config, logger, wording from facefusion.execution_helper import apply_execution_provider_options from facefusion.face_analyser import get_one_face, get_average_face, get_many_faces, find_similar_faces, clear_face_analyser from facefusion.face_masker import create_static_box_mask, create_occlusion_mask, create_region_mask, clear_face_occluder, clear_face_parser from facefusion.face_helper import warp_face_by_face_landmark_5, paste_back from facefusion.face_store import get_reference_faces from facefusion.content_analyser import clear_content_analyser from facefusion.typing import Face, Embedding, VisionFrame, Update_Process, ProcessMode, ModelSet, OptionsWithModel, QueuePayload from facefusion.filesystem import is_file, is_image, has_image, is_video, filter_image_paths, resolve_relative_path from facefusion.download import conditional_download, is_download_done from facefusion.vision import read_image, read_static_image, read_static_images, write_image from facefusion.processors.frame.typings import FaceSwapperInputs from facefusion.processors.frame import globals as frame_processors_globals from facefusion.processors.frame import choices as frame_processors_choices NAME = __name__.upper() def get_one_face(vision_frame : VisionFrame, position : int = 0) -> Optional[Face]: many_faces = get_many_faces(vision_frame) if many_faces: try: return many_faces[position] except IndexError: return many_faces[-1] return None ProcessMode = Literal['output', 'preview', 'stream'] def is_image(image_path : str) -> bool: return is_file(image_path) and filetype.helpers.is_image(image_path) def has_image(image_paths: List[str]) -> bool: if image_paths: return any(is_image(image_path) for image_path in image_paths) return False def is_video(video_path : str) -> bool: return is_file(video_path) and filetype.helpers.is_video(video_path) def filter_image_paths(paths : List[str]) -> List[str]: if paths: return [ path for path in paths if is_image(path) ] return [] def read_static_images(image_paths : List[str]) -> Optional[List[VisionFrame]]: frames = [] if image_paths: for image_path in image_paths: frames.append(read_static_image(image_path)) return frames def pre_process(mode : ProcessMode) -> bool: if not has_image(facefusion.globals.source_paths): logger.error(wording.get('select_image_source') + wording.get('exclamation_mark'), NAME) return False source_image_paths = filter_image_paths(facefusion.globals.source_paths) source_frames = read_static_images(source_image_paths) for source_frame in source_frames: if not get_one_face(source_frame): logger.error(wording.get('no_source_face_detected') + wording.get('exclamation_mark'), NAME) return False if mode in [ 'output', 'preview' ] and not is_image(facefusion.globals.target_path) and not is_video(facefusion.globals.target_path): logger.error(wording.get('select_image_or_video_target') + wording.get('exclamation_mark'), NAME) return False if mode == 'output' and not facefusion.globals.output_path: logger.error(wording.get('select_file_or_directory_output') + wording.get('exclamation_mark'), NAME) return False return True
null
833
from typing import Any, List, Literal, Optional from argparse import ArgumentParser import threading import numpy import onnx import onnxruntime from onnx import numpy_helper import facefusion.globals import facefusion.processors.frame.core as frame_processors from facefusion import config, logger, wording from facefusion.execution_helper import apply_execution_provider_options from facefusion.face_analyser import get_one_face, get_average_face, get_many_faces, find_similar_faces, clear_face_analyser from facefusion.face_masker import create_static_box_mask, create_occlusion_mask, create_region_mask, clear_face_occluder, clear_face_parser from facefusion.face_helper import warp_face_by_face_landmark_5, paste_back from facefusion.face_store import get_reference_faces from facefusion.content_analyser import clear_content_analyser from facefusion.typing import Face, Embedding, VisionFrame, Update_Process, ProcessMode, ModelSet, OptionsWithModel, QueuePayload from facefusion.filesystem import is_file, is_image, has_image, is_video, filter_image_paths, resolve_relative_path from facefusion.download import conditional_download, is_download_done from facefusion.vision import read_image, read_static_image, read_static_images, write_image from facefusion.processors.frame.typings import FaceSwapperInputs from facefusion.processors.frame import globals as frame_processors_globals from facefusion.processors.frame import choices as frame_processors_choices def clear_frame_processor() -> None: global FRAME_PROCESSOR FRAME_PROCESSOR = None def clear_model_matrix() -> None: global MODEL_MATRIX MODEL_MATRIX = None def clear_face_analyser() -> Any: global FACE_ANALYSER FACE_ANALYSER = None def clear_face_occluder() -> None: global FACE_OCCLUDER FACE_OCCLUDER = None def clear_face_parser() -> None: global FACE_PARSER FACE_PARSER = None def clear_content_analyser() -> None: global CONTENT_ANALYSER CONTENT_ANALYSER = None def read_static_image(image_path : str) -> Optional[VisionFrame]: return read_image(image_path) def post_process() -> None: read_static_image.cache_clear() if facefusion.globals.video_memory_strategy == 'strict' or facefusion.globals.video_memory_strategy == 'moderate': clear_frame_processor() clear_model_matrix() if facefusion.globals.video_memory_strategy == 'strict': clear_face_analyser() clear_content_analyser() clear_face_occluder() clear_face_parser()
null
834
from typing import Any, List, Literal, Optional from argparse import ArgumentParser import threading import numpy import onnx import onnxruntime from onnx import numpy_helper import facefusion.globals import facefusion.processors.frame.core as frame_processors from facefusion import config, logger, wording from facefusion.execution_helper import apply_execution_provider_options from facefusion.face_analyser import get_one_face, get_average_face, get_many_faces, find_similar_faces, clear_face_analyser from facefusion.face_masker import create_static_box_mask, create_occlusion_mask, create_region_mask, clear_face_occluder, clear_face_parser from facefusion.face_helper import warp_face_by_face_landmark_5, paste_back from facefusion.face_store import get_reference_faces from facefusion.content_analyser import clear_content_analyser from facefusion.typing import Face, Embedding, VisionFrame, Update_Process, ProcessMode, ModelSet, OptionsWithModel, QueuePayload from facefusion.filesystem import is_file, is_image, has_image, is_video, filter_image_paths, resolve_relative_path from facefusion.download import conditional_download, is_download_done from facefusion.vision import read_image, read_static_image, read_static_images, write_image from facefusion.processors.frame.typings import FaceSwapperInputs from facefusion.processors.frame import globals as frame_processors_globals from facefusion.processors.frame import choices as frame_processors_choices def swap_face(source_face : Face, target_face : Face, temp_vision_frame : VisionFrame) -> VisionFrame: model_template = get_options('model').get('template') model_size = get_options('model').get('size') crop_vision_frame, affine_matrix = warp_face_by_face_landmark_5(temp_vision_frame, target_face.landmark['5/68'], model_template, model_size) crop_mask_list = [] if 'box' in facefusion.globals.face_mask_types: box_mask = create_static_box_mask(crop_vision_frame.shape[:2][::-1], facefusion.globals.face_mask_blur, facefusion.globals.face_mask_padding) crop_mask_list.append(box_mask) if 'occlusion' in facefusion.globals.face_mask_types: occlusion_mask = create_occlusion_mask(crop_vision_frame) crop_mask_list.append(occlusion_mask) crop_vision_frame = prepare_crop_frame(crop_vision_frame) crop_vision_frame = apply_swap(source_face, crop_vision_frame) crop_vision_frame = normalize_crop_frame(crop_vision_frame) if 'region' in facefusion.globals.face_mask_types: region_mask = create_region_mask(crop_vision_frame, facefusion.globals.face_mask_regions) crop_mask_list.append(region_mask) crop_mask = numpy.minimum.reduce(crop_mask_list).clip(0, 1) temp_vision_frame = paste_back(temp_vision_frame, crop_vision_frame, crop_mask, affine_matrix) return temp_vision_frame Face = namedtuple('Face', [ 'bounding_box', 'landmark', 'score', 'embedding', 'normed_embedding', 'gender', 'age' ]) VisionFrame = numpy.ndarray[Any, Any] def get_reference_frame(source_face : Face, target_face : Face, temp_vision_frame : VisionFrame) -> VisionFrame: return swap_face(source_face, target_face, temp_vision_frame)
null
835
from typing import Any, List, Literal, Optional from argparse import ArgumentParser import threading import numpy import onnx import onnxruntime from onnx import numpy_helper import facefusion.globals import facefusion.processors.frame.core as frame_processors from facefusion import config, logger, wording from facefusion.execution_helper import apply_execution_provider_options from facefusion.face_analyser import get_one_face, get_average_face, get_many_faces, find_similar_faces, clear_face_analyser from facefusion.face_masker import create_static_box_mask, create_occlusion_mask, create_region_mask, clear_face_occluder, clear_face_parser from facefusion.face_helper import warp_face_by_face_landmark_5, paste_back from facefusion.face_store import get_reference_faces from facefusion.content_analyser import clear_content_analyser from facefusion.typing import Face, Embedding, VisionFrame, Update_Process, ProcessMode, ModelSet, OptionsWithModel, QueuePayload from facefusion.filesystem import is_file, is_image, has_image, is_video, filter_image_paths, resolve_relative_path from facefusion.download import conditional_download, is_download_done from facefusion.vision import read_image, read_static_image, read_static_images, write_image from facefusion.processors.frame.typings import FaceSwapperInputs from facefusion.processors.frame import globals as frame_processors_globals from facefusion.processors.frame import choices as frame_processors_choices def process_frame(inputs : FaceSwapperInputs) -> VisionFrame: reference_faces = inputs['reference_faces'] source_face = inputs['source_face'] target_vision_frame = inputs['target_vision_frame'] if 'reference' in facefusion.globals.face_selector_mode: similar_faces = find_similar_faces(reference_faces, target_vision_frame, facefusion.globals.reference_face_distance) if similar_faces: for similar_face in similar_faces: target_vision_frame = swap_face(source_face, similar_face, target_vision_frame) if 'one' in facefusion.globals.face_selector_mode: target_face = get_one_face(target_vision_frame) if target_face: target_vision_frame = swap_face(source_face, target_face, target_vision_frame) if 'many' in facefusion.globals.face_selector_mode: many_faces = get_many_faces(target_vision_frame) if many_faces: for target_face in many_faces: target_vision_frame = swap_face(source_face, target_face, target_vision_frame) return target_vision_frame def get_average_face(vision_frames : List[VisionFrame], position : int = 0) -> Optional[Face]: average_face = None faces = [] embedding_list = [] normed_embedding_list = [] for vision_frame in vision_frames: face = get_one_face(vision_frame, position) if face: faces.append(face) embedding_list.append(face.embedding) normed_embedding_list.append(face.normed_embedding) if faces: first_face = get_first(faces) average_face = Face( bounding_box = first_face.bounding_box, landmark = first_face.landmark, score = first_face.score, embedding = numpy.mean(embedding_list, axis = 0), normed_embedding = numpy.mean(normed_embedding_list, axis = 0), gender = first_face.gender, age = first_face.age ) return average_face def get_reference_faces() -> Optional[FaceSet]: if FACE_STORE['reference_faces']: return FACE_STORE['reference_faces'] return None def read_static_image(image_path : str) -> Optional[VisionFrame]: return read_image(image_path) def read_static_images(image_paths : List[str]) -> Optional[List[VisionFrame]]: frames = [] if image_paths: for image_path in image_paths: frames.append(read_static_image(image_path)) return frames def write_image(image_path : str, frame : VisionFrame) -> bool: if image_path: return cv2.imwrite(image_path, frame) return False def process_image(source_paths : List[str], target_path : str, output_path : str) -> None: reference_faces = get_reference_faces() if 'reference' in facefusion.globals.face_selector_mode else None source_frames = read_static_images(source_paths) source_face = get_average_face(source_frames) target_vision_frame = read_static_image(target_path) result_frame = process_frame( { 'reference_faces': reference_faces, 'source_face': source_face, 'target_vision_frame': target_vision_frame }) write_image(output_path, result_frame)
null
836
from typing import Any, List, Literal, Optional from argparse import ArgumentParser import threading import numpy import onnx import onnxruntime from onnx import numpy_helper import facefusion.globals import facefusion.processors.frame.core as frame_processors from facefusion import config, logger, wording from facefusion.execution_helper import apply_execution_provider_options from facefusion.face_analyser import get_one_face, get_average_face, get_many_faces, find_similar_faces, clear_face_analyser from facefusion.face_masker import create_static_box_mask, create_occlusion_mask, create_region_mask, clear_face_occluder, clear_face_parser from facefusion.face_helper import warp_face_by_face_landmark_5, paste_back from facefusion.face_store import get_reference_faces from facefusion.content_analyser import clear_content_analyser from facefusion.typing import Face, Embedding, VisionFrame, Update_Process, ProcessMode, ModelSet, OptionsWithModel, QueuePayload from facefusion.filesystem import is_file, is_image, has_image, is_video, filter_image_paths, resolve_relative_path from facefusion.download import conditional_download, is_download_done from facefusion.vision import read_image, read_static_image, read_static_images, write_image from facefusion.processors.frame.typings import FaceSwapperInputs from facefusion.processors.frame import globals as frame_processors_globals from facefusion.processors.frame import choices as frame_processors_choices def process_frames(source_paths : List[str], queue_payloads : List[QueuePayload], update_progress : Update_Process) -> None: reference_faces = get_reference_faces() if 'reference' in facefusion.globals.face_selector_mode else None source_frames = read_static_images(source_paths) source_face = get_average_face(source_frames) for queue_payload in queue_payloads: target_vision_path = queue_payload['frame_path'] target_vision_frame = read_image(target_vision_path) result_frame = process_frame( { 'reference_faces': reference_faces, 'source_face': source_face, 'target_vision_frame': target_vision_frame }) write_image(target_vision_path, result_frame) update_progress() def process_video(source_paths : List[str], temp_frame_paths : List[str]) -> None: frame_processors.multi_process_frames(source_paths, temp_frame_paths, process_frames)
null
837
from typing import Any, List, Literal, Optional from argparse import ArgumentParser import threading import cv2 from basicsr.archs.rrdbnet_arch import RRDBNet from realesrgan import RealESRGANer import facefusion.globals import facefusion.processors.frame.core as frame_processors from facefusion import config, logger, wording from facefusion.face_analyser import clear_face_analyser from facefusion.content_analyser import clear_content_analyser from facefusion.typing import Face, VisionFrame, Update_Process, ProcessMode, ModelSet, OptionsWithModel, QueuePayload from facefusion.common_helper import create_metavar from facefusion.execution_helper import map_torch_backend from facefusion.filesystem import is_file, resolve_relative_path from facefusion.download import conditional_download, is_download_done from facefusion.vision import read_image, read_static_image, write_image from facefusion.processors.frame.typings import FrameEnhancerInputs from facefusion.processors.frame import globals as frame_processors_globals from facefusion.processors.frame import choices as frame_processors_choices OPTIONS : Optional[OptionsWithModel] = None def set_options(key : Literal['model'], value : Any) -> None: global OPTIONS OPTIONS[key] = value
null
838
from typing import Any, List, Literal, Optional from argparse import ArgumentParser import threading import cv2 from basicsr.archs.rrdbnet_arch import RRDBNet from realesrgan import RealESRGANer import facefusion.globals import facefusion.processors.frame.core as frame_processors from facefusion import config, logger, wording from facefusion.face_analyser import clear_face_analyser from facefusion.content_analyser import clear_content_analyser from facefusion.typing import Face, VisionFrame, Update_Process, ProcessMode, ModelSet, OptionsWithModel, QueuePayload from facefusion.common_helper import create_metavar from facefusion.execution_helper import map_torch_backend from facefusion.filesystem import is_file, resolve_relative_path from facefusion.download import conditional_download, is_download_done from facefusion.vision import read_image, read_static_image, write_image from facefusion.processors.frame.typings import FrameEnhancerInputs from facefusion.processors.frame import globals as frame_processors_globals from facefusion.processors.frame import choices as frame_processors_choices def create_metavar(ranges : List[Any]) -> str: return '[' + str(ranges[0]) + '-' + str(ranges[-1]) + ']' def register_args(program : ArgumentParser) -> None: program.add_argument('--frame-enhancer-model', help = wording.get('help.frame_enhancer_model'), default = config.get_str_value('frame_processors.frame_enhancer_model', 'real_esrgan_x2plus'), choices = frame_processors_choices.frame_enhancer_models) program.add_argument('--frame-enhancer-blend', help = wording.get('help.frame_enhancer_blend'), type = int, default = config.get_int_value('frame_processors.frame_enhancer_blend', '80'), choices = frame_processors_choices.frame_enhancer_blend_range, metavar = create_metavar(frame_processors_choices.frame_enhancer_blend_range))
null
839
from typing import Any, List, Literal, Optional from argparse import ArgumentParser import threading import cv2 from basicsr.archs.rrdbnet_arch import RRDBNet from realesrgan import RealESRGANer import facefusion.globals import facefusion.processors.frame.core as frame_processors from facefusion import config, logger, wording from facefusion.face_analyser import clear_face_analyser from facefusion.content_analyser import clear_content_analyser from facefusion.typing import Face, VisionFrame, Update_Process, ProcessMode, ModelSet, OptionsWithModel, QueuePayload from facefusion.common_helper import create_metavar from facefusion.execution_helper import map_torch_backend from facefusion.filesystem import is_file, resolve_relative_path from facefusion.download import conditional_download, is_download_done from facefusion.vision import read_image, read_static_image, write_image from facefusion.processors.frame.typings import FrameEnhancerInputs from facefusion.processors.frame import globals as frame_processors_globals from facefusion.processors.frame import choices as frame_processors_choices def apply_args(program : ArgumentParser) -> None: args = program.parse_args() frame_processors_globals.frame_enhancer_model = args.frame_enhancer_model frame_processors_globals.frame_enhancer_blend = args.frame_enhancer_blend
null
840
from typing import Any, List, Literal, Optional from argparse import ArgumentParser import threading import cv2 from basicsr.archs.rrdbnet_arch import RRDBNet from realesrgan import RealESRGANer import facefusion.globals import facefusion.processors.frame.core as frame_processors from facefusion import config, logger, wording from facefusion.face_analyser import clear_face_analyser from facefusion.content_analyser import clear_content_analyser from facefusion.typing import Face, VisionFrame, Update_Process, ProcessMode, ModelSet, OptionsWithModel, QueuePayload from facefusion.common_helper import create_metavar from facefusion.execution_helper import map_torch_backend from facefusion.filesystem import is_file, resolve_relative_path from facefusion.download import conditional_download, is_download_done from facefusion.vision import read_image, read_static_image, write_image from facefusion.processors.frame.typings import FrameEnhancerInputs from facefusion.processors.frame import globals as frame_processors_globals from facefusion.processors.frame import choices as frame_processors_choices def get_options(key : Literal['model']) -> Any: global OPTIONS if OPTIONS is None: OPTIONS =\ { 'model': MODELS[frame_processors_globals.frame_enhancer_model] } return OPTIONS.get(key) def resolve_relative_path(path : str) -> str: return os.path.abspath(os.path.join(os.path.dirname(__file__), path)) def conditional_download(download_directory_path : str, urls : List[str]) -> None: with ThreadPoolExecutor() as executor: for url in urls: executor.submit(get_download_size, url) for url in urls: download_file_path = os.path.join(download_directory_path, os.path.basename(url)) initial = os.path.getsize(download_file_path) if is_file(download_file_path) else 0 total = get_download_size(url) if initial < total: with tqdm(total = total, initial = initial, desc = wording.get('downloading'), unit = 'B', unit_scale = True, unit_divisor = 1024, ascii = ' =', disable = facefusion.globals.log_level in [ 'warn', 'error' ]) as progress: subprocess.Popen([ 'curl', '--create-dirs', '--silent', '--insecure', '--location', '--continue-at', '-', '--output', download_file_path, url ]) current = initial while current < total: if is_file(download_file_path): current = os.path.getsize(download_file_path) progress.update(current - progress.n) def pre_check() -> bool: if not facefusion.globals.skip_download: download_directory_path = resolve_relative_path('../.assets/models') model_url = get_options('model').get('url') conditional_download(download_directory_path, [ model_url ]) return True
null
841
from typing import Any, List, Literal, Optional from argparse import ArgumentParser import threading import cv2 from basicsr.archs.rrdbnet_arch import RRDBNet from realesrgan import RealESRGANer import facefusion.globals import facefusion.processors.frame.core as frame_processors from facefusion import config, logger, wording from facefusion.face_analyser import clear_face_analyser from facefusion.content_analyser import clear_content_analyser from facefusion.typing import Face, VisionFrame, Update_Process, ProcessMode, ModelSet, OptionsWithModel, QueuePayload from facefusion.common_helper import create_metavar from facefusion.execution_helper import map_torch_backend from facefusion.filesystem import is_file, resolve_relative_path from facefusion.download import conditional_download, is_download_done from facefusion.vision import read_image, read_static_image, write_image from facefusion.processors.frame.typings import FrameEnhancerInputs from facefusion.processors.frame import globals as frame_processors_globals from facefusion.processors.frame import choices as frame_processors_choices NAME = __name__.upper() def get_options(key : Literal['model']) -> Any: global OPTIONS if OPTIONS is None: OPTIONS =\ { 'model': MODELS[frame_processors_globals.frame_enhancer_model] } return OPTIONS.get(key) def is_file(file_path : str) -> bool: return bool(file_path and os.path.isfile(file_path)) def is_download_done(url : str, file_path : str) -> bool: if is_file(file_path): return get_download_size(url) == os.path.getsize(file_path) return False def post_check() -> bool: model_url = get_options('model').get('url') model_path = get_options('model').get('path') if not facefusion.globals.skip_download and not is_download_done(model_url, model_path): logger.error(wording.get('model_download_not_done') + wording.get('exclamation_mark'), NAME) return False elif not is_file(model_path): logger.error(wording.get('model_file_not_present') + wording.get('exclamation_mark'), NAME) return False return True
null
842
from typing import Any, List, Literal, Optional from argparse import ArgumentParser import threading import cv2 from basicsr.archs.rrdbnet_arch import RRDBNet from realesrgan import RealESRGANer import facefusion.globals import facefusion.processors.frame.core as frame_processors from facefusion import config, logger, wording from facefusion.face_analyser import clear_face_analyser from facefusion.content_analyser import clear_content_analyser from facefusion.typing import Face, VisionFrame, Update_Process, ProcessMode, ModelSet, OptionsWithModel, QueuePayload from facefusion.common_helper import create_metavar from facefusion.execution_helper import map_torch_backend from facefusion.filesystem import is_file, resolve_relative_path from facefusion.download import conditional_download, is_download_done from facefusion.vision import read_image, read_static_image, write_image from facefusion.processors.frame.typings import FrameEnhancerInputs from facefusion.processors.frame import globals as frame_processors_globals from facefusion.processors.frame import choices as frame_processors_choices NAME = __name__.upper() ProcessMode = Literal['output', 'preview', 'stream'] def pre_process(mode : ProcessMode) -> bool: if mode == 'output' and not facefusion.globals.output_path: logger.error(wording.get('select_file_or_directory_output') + wording.get('exclamation_mark'), NAME) return False return True
null
843
from typing import Any, List, Literal, Optional from argparse import ArgumentParser import threading import cv2 from basicsr.archs.rrdbnet_arch import RRDBNet from realesrgan import RealESRGANer import facefusion.globals import facefusion.processors.frame.core as frame_processors from facefusion import config, logger, wording from facefusion.face_analyser import clear_face_analyser from facefusion.content_analyser import clear_content_analyser from facefusion.typing import Face, VisionFrame, Update_Process, ProcessMode, ModelSet, OptionsWithModel, QueuePayload from facefusion.common_helper import create_metavar from facefusion.execution_helper import map_torch_backend from facefusion.filesystem import is_file, resolve_relative_path from facefusion.download import conditional_download, is_download_done from facefusion.vision import read_image, read_static_image, write_image from facefusion.processors.frame.typings import FrameEnhancerInputs from facefusion.processors.frame import globals as frame_processors_globals from facefusion.processors.frame import choices as frame_processors_choices def clear_frame_processor() -> None: global FRAME_PROCESSOR FRAME_PROCESSOR = None def clear_face_analyser() -> Any: global FACE_ANALYSER FACE_ANALYSER = None def clear_content_analyser() -> None: global CONTENT_ANALYSER CONTENT_ANALYSER = None def read_static_image(image_path : str) -> Optional[VisionFrame]: return read_image(image_path) def post_process() -> None: read_static_image.cache_clear() if facefusion.globals.video_memory_strategy == 'strict' or facefusion.globals.video_memory_strategy == 'moderate': clear_frame_processor() if facefusion.globals.video_memory_strategy == 'strict': clear_face_analyser() clear_content_analyser()
null
844
from typing import Any, List, Literal, Optional from argparse import ArgumentParser import threading import cv2 from basicsr.archs.rrdbnet_arch import RRDBNet from realesrgan import RealESRGANer import facefusion.globals import facefusion.processors.frame.core as frame_processors from facefusion import config, logger, wording from facefusion.face_analyser import clear_face_analyser from facefusion.content_analyser import clear_content_analyser from facefusion.typing import Face, VisionFrame, Update_Process, ProcessMode, ModelSet, OptionsWithModel, QueuePayload from facefusion.common_helper import create_metavar from facefusion.execution_helper import map_torch_backend from facefusion.filesystem import is_file, resolve_relative_path from facefusion.download import conditional_download, is_download_done from facefusion.vision import read_image, read_static_image, write_image from facefusion.processors.frame.typings import FrameEnhancerInputs from facefusion.processors.frame import globals as frame_processors_globals from facefusion.processors.frame import choices as frame_processors_choices Face = namedtuple('Face', [ 'bounding_box', 'landmark', 'score', 'embedding', 'normed_embedding', 'gender', 'age' ]) VisionFrame = numpy.ndarray[Any, Any] def get_reference_frame(source_face : Face, target_face : Face, temp_vision_frame : VisionFrame) -> VisionFrame: pass
null
845
from typing import Any, List, Literal, Optional from argparse import ArgumentParser import threading import cv2 from basicsr.archs.rrdbnet_arch import RRDBNet from realesrgan import RealESRGANer import facefusion.globals import facefusion.processors.frame.core as frame_processors from facefusion import config, logger, wording from facefusion.face_analyser import clear_face_analyser from facefusion.content_analyser import clear_content_analyser from facefusion.typing import Face, VisionFrame, Update_Process, ProcessMode, ModelSet, OptionsWithModel, QueuePayload from facefusion.common_helper import create_metavar from facefusion.execution_helper import map_torch_backend from facefusion.filesystem import is_file, resolve_relative_path from facefusion.download import conditional_download, is_download_done from facefusion.vision import read_image, read_static_image, write_image from facefusion.processors.frame.typings import FrameEnhancerInputs from facefusion.processors.frame import globals as frame_processors_globals from facefusion.processors.frame import choices as frame_processors_choices def process_frame(inputs : FrameEnhancerInputs) -> VisionFrame: target_vision_frame = inputs['target_vision_frame'] return enhance_frame(target_vision_frame) def read_static_image(image_path : str) -> Optional[VisionFrame]: return read_image(image_path) def write_image(image_path : str, frame : VisionFrame) -> bool: if image_path: return cv2.imwrite(image_path, frame) return False def process_image(source_paths : List[str], target_path : str, output_path : str) -> None: target_vision_frame = read_static_image(target_path) result_frame = process_frame( { 'target_vision_frame': target_vision_frame }) write_image(output_path, result_frame)
null
846
from typing import Any, List, Literal, Optional from argparse import ArgumentParser import threading import cv2 from basicsr.archs.rrdbnet_arch import RRDBNet from realesrgan import RealESRGANer import facefusion.globals import facefusion.processors.frame.core as frame_processors from facefusion import config, logger, wording from facefusion.face_analyser import clear_face_analyser from facefusion.content_analyser import clear_content_analyser from facefusion.typing import Face, VisionFrame, Update_Process, ProcessMode, ModelSet, OptionsWithModel, QueuePayload from facefusion.common_helper import create_metavar from facefusion.execution_helper import map_torch_backend from facefusion.filesystem import is_file, resolve_relative_path from facefusion.download import conditional_download, is_download_done from facefusion.vision import read_image, read_static_image, write_image from facefusion.processors.frame.typings import FrameEnhancerInputs from facefusion.processors.frame import globals as frame_processors_globals from facefusion.processors.frame import choices as frame_processors_choices def process_frames(source_paths : List[str], queue_payloads : List[QueuePayload], update_progress : Update_Process) -> None: def process_video(source_paths : List[str], temp_frame_paths : List[str]) -> None: frame_processors.multi_process_frames(None, temp_frame_paths, process_frames)
null
847
from typing import Any, List, Literal from argparse import ArgumentParser import cv2 import numpy import facefusion.globals import facefusion.processors.frame.core as frame_processors from facefusion import config, wording from facefusion.face_analyser import get_one_face, get_many_faces, find_similar_faces, clear_face_analyser from facefusion.face_masker import create_static_box_mask, create_occlusion_mask, create_region_mask, clear_face_occluder, clear_face_parser from facefusion.face_helper import warp_face_by_face_landmark_5, categorize_age, categorize_gender from facefusion.face_store import get_reference_faces from facefusion.content_analyser import clear_content_analyser from facefusion.typing import Face, VisionFrame, Update_Process, ProcessMode, QueuePayload from facefusion.vision import read_image, read_static_image, write_image from facefusion.processors.frame.typings import FaceDebuggerInputs from facefusion.processors.frame import globals as frame_processors_globals, choices as frame_processors_choices def get_frame_processor() -> None: pass
null
848
from typing import Any, List, Literal from argparse import ArgumentParser import cv2 import numpy import facefusion.globals import facefusion.processors.frame.core as frame_processors from facefusion import config, wording from facefusion.face_analyser import get_one_face, get_many_faces, find_similar_faces, clear_face_analyser from facefusion.face_masker import create_static_box_mask, create_occlusion_mask, create_region_mask, clear_face_occluder, clear_face_parser from facefusion.face_helper import warp_face_by_face_landmark_5, categorize_age, categorize_gender from facefusion.face_store import get_reference_faces from facefusion.content_analyser import clear_content_analyser from facefusion.typing import Face, VisionFrame, Update_Process, ProcessMode, QueuePayload from facefusion.vision import read_image, read_static_image, write_image from facefusion.processors.frame.typings import FaceDebuggerInputs from facefusion.processors.frame import globals as frame_processors_globals, choices as frame_processors_choices def get_options(key : Literal['model']) -> None: pass
null
849
from typing import Any, List, Literal from argparse import ArgumentParser import cv2 import numpy import facefusion.globals import facefusion.processors.frame.core as frame_processors from facefusion import config, wording from facefusion.face_analyser import get_one_face, get_many_faces, find_similar_faces, clear_face_analyser from facefusion.face_masker import create_static_box_mask, create_occlusion_mask, create_region_mask, clear_face_occluder, clear_face_parser from facefusion.face_helper import warp_face_by_face_landmark_5, categorize_age, categorize_gender from facefusion.face_store import get_reference_faces from facefusion.content_analyser import clear_content_analyser from facefusion.typing import Face, VisionFrame, Update_Process, ProcessMode, QueuePayload from facefusion.vision import read_image, read_static_image, write_image from facefusion.processors.frame.typings import FaceDebuggerInputs from facefusion.processors.frame import globals as frame_processors_globals, choices as frame_processors_choices def set_options(key : Literal['model'], value : Any) -> None: pass
null
850
from typing import Any, List, Literal from argparse import ArgumentParser import cv2 import numpy import facefusion.globals import facefusion.processors.frame.core as frame_processors from facefusion import config, wording from facefusion.face_analyser import get_one_face, get_many_faces, find_similar_faces, clear_face_analyser from facefusion.face_masker import create_static_box_mask, create_occlusion_mask, create_region_mask, clear_face_occluder, clear_face_parser from facefusion.face_helper import warp_face_by_face_landmark_5, categorize_age, categorize_gender from facefusion.face_store import get_reference_faces from facefusion.content_analyser import clear_content_analyser from facefusion.typing import Face, VisionFrame, Update_Process, ProcessMode, QueuePayload from facefusion.vision import read_image, read_static_image, write_image from facefusion.processors.frame.typings import FaceDebuggerInputs from facefusion.processors.frame import globals as frame_processors_globals, choices as frame_processors_choices def register_args(program : ArgumentParser) -> None: program.add_argument('--face-debugger-items', help = wording.get('help.face_debugger_items').format(choices = ', '.join(frame_processors_choices.face_debugger_items)), default = config.get_str_list('frame_processors.face_debugger_items', 'landmark-5 face-mask'), choices = frame_processors_choices.face_debugger_items, nargs = '+', metavar = 'FACE_DEBUGGER_ITEMS')
null
851
from typing import Any, List, Literal from argparse import ArgumentParser import cv2 import numpy import facefusion.globals import facefusion.processors.frame.core as frame_processors from facefusion import config, wording from facefusion.face_analyser import get_one_face, get_many_faces, find_similar_faces, clear_face_analyser from facefusion.face_masker import create_static_box_mask, create_occlusion_mask, create_region_mask, clear_face_occluder, clear_face_parser from facefusion.face_helper import warp_face_by_face_landmark_5, categorize_age, categorize_gender from facefusion.face_store import get_reference_faces from facefusion.content_analyser import clear_content_analyser from facefusion.typing import Face, VisionFrame, Update_Process, ProcessMode, QueuePayload from facefusion.vision import read_image, read_static_image, write_image from facefusion.processors.frame.typings import FaceDebuggerInputs from facefusion.processors.frame import globals as frame_processors_globals, choices as frame_processors_choices def apply_args(program : ArgumentParser) -> None: args = program.parse_args() frame_processors_globals.face_debugger_items = args.face_debugger_items
null
852
from typing import Any, List, Literal from argparse import ArgumentParser import cv2 import numpy import facefusion.globals import facefusion.processors.frame.core as frame_processors from facefusion import config, wording from facefusion.face_analyser import get_one_face, get_many_faces, find_similar_faces, clear_face_analyser from facefusion.face_masker import create_static_box_mask, create_occlusion_mask, create_region_mask, clear_face_occluder, clear_face_parser from facefusion.face_helper import warp_face_by_face_landmark_5, categorize_age, categorize_gender from facefusion.face_store import get_reference_faces from facefusion.content_analyser import clear_content_analyser from facefusion.typing import Face, VisionFrame, Update_Process, ProcessMode, QueuePayload from facefusion.vision import read_image, read_static_image, write_image from facefusion.processors.frame.typings import FaceDebuggerInputs from facefusion.processors.frame import globals as frame_processors_globals, choices as frame_processors_choices def pre_check() -> bool: return True
null
853
from typing import Any, List, Literal from argparse import ArgumentParser import cv2 import numpy import facefusion.globals import facefusion.processors.frame.core as frame_processors from facefusion import config, wording from facefusion.face_analyser import get_one_face, get_many_faces, find_similar_faces, clear_face_analyser from facefusion.face_masker import create_static_box_mask, create_occlusion_mask, create_region_mask, clear_face_occluder, clear_face_parser from facefusion.face_helper import warp_face_by_face_landmark_5, categorize_age, categorize_gender from facefusion.face_store import get_reference_faces from facefusion.content_analyser import clear_content_analyser from facefusion.typing import Face, VisionFrame, Update_Process, ProcessMode, QueuePayload from facefusion.vision import read_image, read_static_image, write_image from facefusion.processors.frame.typings import FaceDebuggerInputs from facefusion.processors.frame import globals as frame_processors_globals, choices as frame_processors_choices def post_check() -> bool: return True
null
854
from typing import Any, List, Literal from argparse import ArgumentParser import cv2 import numpy import facefusion.globals import facefusion.processors.frame.core as frame_processors from facefusion import config, wording from facefusion.face_analyser import get_one_face, get_many_faces, find_similar_faces, clear_face_analyser from facefusion.face_masker import create_static_box_mask, create_occlusion_mask, create_region_mask, clear_face_occluder, clear_face_parser from facefusion.face_helper import warp_face_by_face_landmark_5, categorize_age, categorize_gender from facefusion.face_store import get_reference_faces from facefusion.content_analyser import clear_content_analyser from facefusion.typing import Face, VisionFrame, Update_Process, ProcessMode, QueuePayload from facefusion.vision import read_image, read_static_image, write_image from facefusion.processors.frame.typings import FaceDebuggerInputs from facefusion.processors.frame import globals as frame_processors_globals, choices as frame_processors_choices ProcessMode = Literal['output', 'preview', 'stream'] def pre_process(mode : ProcessMode) -> bool: return True
null
855
from typing import Any, List, Literal from argparse import ArgumentParser import cv2 import numpy import facefusion.globals import facefusion.processors.frame.core as frame_processors from facefusion import config, wording from facefusion.face_analyser import get_one_face, get_many_faces, find_similar_faces, clear_face_analyser from facefusion.face_masker import create_static_box_mask, create_occlusion_mask, create_region_mask, clear_face_occluder, clear_face_parser from facefusion.face_helper import warp_face_by_face_landmark_5, categorize_age, categorize_gender from facefusion.face_store import get_reference_faces from facefusion.content_analyser import clear_content_analyser from facefusion.typing import Face, VisionFrame, Update_Process, ProcessMode, QueuePayload from facefusion.vision import read_image, read_static_image, write_image from facefusion.processors.frame.typings import FaceDebuggerInputs from facefusion.processors.frame import globals as frame_processors_globals, choices as frame_processors_choices def clear_frame_processor() -> None: pass def clear_face_analyser() -> Any: global FACE_ANALYSER FACE_ANALYSER = None def clear_face_occluder() -> None: global FACE_OCCLUDER FACE_OCCLUDER = None def clear_face_parser() -> None: global FACE_PARSER FACE_PARSER = None def clear_content_analyser() -> None: global CONTENT_ANALYSER CONTENT_ANALYSER = None def read_static_image(image_path : str) -> Optional[VisionFrame]: return read_image(image_path) def post_process() -> None: read_static_image.cache_clear() if facefusion.globals.video_memory_strategy == 'strict' or facefusion.globals.video_memory_strategy == 'moderate': clear_frame_processor() if facefusion.globals.video_memory_strategy == 'strict': clear_face_analyser() clear_content_analyser() clear_face_occluder() clear_face_parser()
null
856
from typing import Any, List, Literal from argparse import ArgumentParser import cv2 import numpy import facefusion.globals import facefusion.processors.frame.core as frame_processors from facefusion import config, wording from facefusion.face_analyser import get_one_face, get_many_faces, find_similar_faces, clear_face_analyser from facefusion.face_masker import create_static_box_mask, create_occlusion_mask, create_region_mask, clear_face_occluder, clear_face_parser from facefusion.face_helper import warp_face_by_face_landmark_5, categorize_age, categorize_gender from facefusion.face_store import get_reference_faces from facefusion.content_analyser import clear_content_analyser from facefusion.typing import Face, VisionFrame, Update_Process, ProcessMode, QueuePayload from facefusion.vision import read_image, read_static_image, write_image from facefusion.processors.frame.typings import FaceDebuggerInputs from facefusion.processors.frame import globals as frame_processors_globals, choices as frame_processors_choices Face = namedtuple('Face', [ 'bounding_box', 'landmark', 'score', 'embedding', 'normed_embedding', 'gender', 'age' ]) VisionFrame = numpy.ndarray[Any, Any] def get_reference_frame(source_face : Face, target_face : Face, temp_vision_frame : VisionFrame) -> VisionFrame: pass
null
857
from typing import Any, List, Literal from argparse import ArgumentParser import cv2 import numpy import facefusion.globals import facefusion.processors.frame.core as frame_processors from facefusion import config, wording from facefusion.face_analyser import get_one_face, get_many_faces, find_similar_faces, clear_face_analyser from facefusion.face_masker import create_static_box_mask, create_occlusion_mask, create_region_mask, clear_face_occluder, clear_face_parser from facefusion.face_helper import warp_face_by_face_landmark_5, categorize_age, categorize_gender from facefusion.face_store import get_reference_faces from facefusion.content_analyser import clear_content_analyser from facefusion.typing import Face, VisionFrame, Update_Process, ProcessMode, QueuePayload from facefusion.vision import read_image, read_static_image, write_image from facefusion.processors.frame.typings import FaceDebuggerInputs from facefusion.processors.frame import globals as frame_processors_globals, choices as frame_processors_choices def process_frame(inputs : FaceDebuggerInputs) -> VisionFrame: reference_faces = inputs['reference_faces'] target_vision_frame = inputs['target_vision_frame'] if 'reference' in facefusion.globals.face_selector_mode: similar_faces = find_similar_faces(reference_faces, target_vision_frame, facefusion.globals.reference_face_distance) if similar_faces: for similar_face in similar_faces: target_vision_frame = debug_face(similar_face, target_vision_frame) if 'one' in facefusion.globals.face_selector_mode: target_face = get_one_face(target_vision_frame) if target_face: target_vision_frame = debug_face(target_face, target_vision_frame) if 'many' in facefusion.globals.face_selector_mode: many_faces = get_many_faces(target_vision_frame) if many_faces: for target_face in many_faces: target_vision_frame = debug_face(target_face, target_vision_frame) return target_vision_frame def get_reference_faces() -> Optional[FaceSet]: if FACE_STORE['reference_faces']: return FACE_STORE['reference_faces'] return None def read_static_image(image_path : str) -> Optional[VisionFrame]: return read_image(image_path) def write_image(image_path : str, frame : VisionFrame) -> bool: if image_path: return cv2.imwrite(image_path, frame) return False def process_image(source_paths : List[str], target_path : str, output_path : str) -> None: reference_faces = get_reference_faces() if 'reference' in facefusion.globals.face_selector_mode else None target_vision_frame = read_static_image(target_path) result_frame = process_frame( { 'reference_faces': reference_faces, 'target_vision_frame': target_vision_frame }) write_image(output_path, result_frame)
null
858
from typing import Any, List, Literal from argparse import ArgumentParser import cv2 import numpy import facefusion.globals import facefusion.processors.frame.core as frame_processors from facefusion import config, wording from facefusion.face_analyser import get_one_face, get_many_faces, find_similar_faces, clear_face_analyser from facefusion.face_masker import create_static_box_mask, create_occlusion_mask, create_region_mask, clear_face_occluder, clear_face_parser from facefusion.face_helper import warp_face_by_face_landmark_5, categorize_age, categorize_gender from facefusion.face_store import get_reference_faces from facefusion.content_analyser import clear_content_analyser from facefusion.typing import Face, VisionFrame, Update_Process, ProcessMode, QueuePayload from facefusion.vision import read_image, read_static_image, write_image from facefusion.processors.frame.typings import FaceDebuggerInputs from facefusion.processors.frame import globals as frame_processors_globals, choices as frame_processors_choices def process_frames(source_paths : List[str], queue_payloads : List[QueuePayload], update_progress : Update_Process) -> None: reference_faces = get_reference_faces() if 'reference' in facefusion.globals.face_selector_mode else None for queue_payload in queue_payloads: target_vision_path = queue_payload['frame_path'] target_vision_frame = read_image(target_vision_path) result_frame = process_frame( { 'reference_faces': reference_faces, 'target_vision_frame': target_vision_frame }) write_image(target_vision_path, result_frame) update_progress() def process_video(source_paths : List[str], temp_frame_paths : List[str]) -> None: frame_processors.multi_process_frames(source_paths, temp_frame_paths, process_frames)
null
859
from typing import Dict, Tuple import sys import os import platform import tempfile import subprocess import inquirer from argparse import ArgumentParser, HelpFormatter from facefusion import metadata, wording TORCH : Dict[str, str] =\ { 'default': 'default', 'cpu': 'cpu' } ONNXRUNTIMES : Dict[str, Tuple[str, str]] = {} def run(program : ArgumentParser) -> None: args = program.parse_args() python_id = 'cp' + str(sys.version_info.major) + str(sys.version_info.minor) if platform.system().lower() == 'darwin': os.environ['SYSTEM_VERSION_COMPAT'] = '0' if not args.skip_venv: os.environ['PIP_REQUIRE_VIRTUALENV'] = '1' if args.torch and args.onnxruntime: answers =\ { 'torch': args.torch, 'onnxruntime': args.onnxruntime } else: answers = inquirer.prompt( [ inquirer.List('torch', message = wording.get('help.install_dependency').format(dependency = 'torch'), choices = list(TORCH.keys())), inquirer.List('onnxruntime', message = wording.get('help.install_dependency').format(dependency = 'onnxruntime'), choices = list(ONNXRUNTIMES.keys())) ]) if answers: torch = answers['torch'] torch_wheel = TORCH[torch] onnxruntime = answers['onnxruntime'] onnxruntime_name, onnxruntime_version = ONNXRUNTIMES[onnxruntime] subprocess.call([ 'pip', 'uninstall', 'torch', '-y', '-q' ]) if torch_wheel == 'default': subprocess.call([ 'pip', 'install', '-r', 'requirements.txt', '--force-reinstall' ]) else: subprocess.call([ 'pip', 'install', '-r', 'requirements.txt', '--extra-index-url', 'https://download.pytorch.org/whl/' + torch_wheel, '--force-reinstall' ]) if onnxruntime == 'rocm-5.4.2' or onnxruntime == 'rocm-5.6': if python_id in [ 'cp39', 'cp310', 'cp311' ]: rocm_version = onnxruntime.replace('-', '') rocm_version = rocm_version.replace('.', '') wheel_name = 'onnxruntime_training-' + onnxruntime_version + '+' + rocm_version + '-' + python_id + '-' + python_id + '-manylinux_2_17_x86_64.manylinux2014_x86_64.whl' wheel_path = os.path.join(tempfile.gettempdir(), wheel_name) wheel_url = 'https://download.onnxruntime.ai/' + wheel_name subprocess.call([ 'curl', '--silent', '--location', '--continue-at', '-', '--output', wheel_path, wheel_url ]) subprocess.call([ 'pip', 'uninstall', wheel_path, '-y', '-q' ]) subprocess.call([ 'pip', 'install', wheel_path, '--force-reinstall' ]) os.remove(wheel_path) else: subprocess.call([ 'pip', 'uninstall', 'onnxruntime', onnxruntime_name, '-y', '-q' ]) if onnxruntime == 'cuda-12.1': subprocess.call([ 'pip', 'install', onnxruntime_name + '==' + onnxruntime_version, '--extra-index-url', 'https://pkgs.dev.azure.com/onnxruntime/onnxruntime/_packaging/onnxruntime-cuda-12/pypi/simple', '--force-reinstall' ]) else: subprocess.call([ 'pip', 'install', onnxruntime_name + '==' + onnxruntime_version, '--force-reinstall' ]) def cli() -> None: program = ArgumentParser(formatter_class = lambda prog: HelpFormatter(prog, max_help_position = 130)) program.add_argument('--torch', help = wording.get('help.install_dependency').format(dependency = 'torch'), choices = TORCH.keys()) program.add_argument('--onnxruntime', help = wording.get('help.install_dependency').format(dependency = 'onnxruntime'), choices = ONNXRUNTIMES.keys()) program.add_argument('--skip-venv', help = wording.get('help.skip_venv'), action = 'store_true') program.add_argument('-v', '--version', version = metadata.get('name') + ' ' + metadata.get('version'), action = 'version') run(program)
null
860
import logging from typing import List import click from rich import print from name_that_hash import check_hashes, hash_namer, hashes, prettifier def print_help(ctx): click.echo(ctx.get_help()) ctx.exit()
null
861
import logging from typing import List import click from rich import print from name_that_hash import check_hashes, hash_namer, hashes, prettifier def banner(): text = r"""[bold blue] _ _ _____ _ _ _ _ _ | \ | | |_ _| | | | | | | | | | | \| | __ _ _ __ ___ ___ ______| | | |__ __ _| |_ ______| |_| | __ _ ___| |__ | . ` |/ _` | '_ ` _ \ / _ \______| | | '_ \ / _` | __|______| _ |/ _` / __| '_ \ | |\ | (_| | | | | | | __/ | | | | | | (_| | |_ | | | | (_| \__ \ | | | \_| \_/\__,_|_| |_| |_|\___| \_/ |_| |_|\__,_|\__| \_| |_/\__,_|___/_| |_| https://twitter.com/bee_sec_san https://github.com/HashPals/Name-That-Hash [/bold blue] """ print(text)
null
862
import logging from typing import List import click from rich import print from name_that_hash import check_hashes, hash_namer, hashes, prettifier def set_logging(kwargs): if kwargs["verbose"]: logging.basicConfig( level=logging.DEBUG, format="%(asctime)s - %(message)s", datefmt="%d-%b-%y %H:%M:%S", ) else: logging.basicConfig(level=logging.CRITICAL)
null
863
import logging from typing import List import click from rich import print from name_that_hash import check_hashes, hash_namer, hashes, prettifier def compute_hashes_for_api(chash: List, args: dict = {}): # nth = the object which names the hash types nth = hash_namer.Name_That_Hash(hashes.prototypes) hashChecker = check_hashes.HashChecker(args, nth) for i in chash: hashChecker.single_hash(i) return hashChecker.output The provided code snippet includes necessary dependencies for implementing the `api_return_hashes_as_json` function. Write a Python function `def api_return_hashes_as_json(chash: List, args: dict = {"popular_only": False})` to solve the following problem: Using name-that-hash as an API? Call this function! Given a list of hashes of strings return a list of json of all hashes in the same order as the input Here is the function: def api_return_hashes_as_json(chash: List, args: dict = {"popular_only": False}): """ Using name-that-hash as an API? Call this function! Given a list of hashes of strings return a list of json of all hashes in the same order as the input """ pretty_printer = prettifier.Prettifier(args, api=True) return pretty_printer.greppable_output(compute_hashes_for_api(chash, args))
Using name-that-hash as an API? Call this function! Given a list of hashes of strings return a list of json of all hashes in the same order as the input
864
import logging from typing import List import click from rich import print from name_that_hash import check_hashes, hash_namer, hashes, prettifier def compute_hashes_for_api(chash: List, args: dict = {}): # nth = the object which names the hash types nth = hash_namer.Name_That_Hash(hashes.prototypes) hashChecker = check_hashes.HashChecker(args, nth) for i in chash: hashChecker.single_hash(i) return hashChecker.output The provided code snippet includes necessary dependencies for implementing the `api_return_hashes_as_dict` function. Write a Python function `def api_return_hashes_as_dict(chash: List, args: dict = {"popular_only": False})` to solve the following problem: Returns hashes as a Python dictionary Here is the function: def api_return_hashes_as_dict(chash: List, args: dict = {"popular_only": False}): """ Returns hashes as a Python dictionary """ pretty_printer = prettifier.Prettifier(args, api=True) return pretty_printer.turn_hash_objs_into_dict(compute_hashes_for_api(chash, args))
Returns hashes as a Python dictionary
865
from typing import Any import nox from nox.sessions import Session locations = "name_that_hash/", "tests/", "docs/" def black(session): args = session.posargs or locations session.install("black") session.run("black", *args)
null
866
from typing import Any import nox from nox.sessions import Session def install_with_constraints(session: Session, *args: str, **kwargs: Any) -> None: """Install packages constrained by Poetry's lock file. This function is a wrapper for nox.sessions.Session.install. It invokes pip to install packages inside of the session's virtualenv. Additionally, pip is passed a constraints file generated from Poetry's lock file, to ensure that the packages are pinned to the versions specified in poetry.lock. This allows you to manage the packages as Poetry development dependencies. Arguments: session: The Session object. args: Command-line arguments for pip. kwargs: Additional keyword arguments for Session.install. """ session.run( "poetry", "export", "--dev", "--format=requirements.txt", "--output=requirements.txt", external=True, ) session.install("--constraint=requirements.txt", *args, **kwargs) The provided code snippet includes necessary dependencies for implementing the `coverage` function. Write a Python function `def coverage(session: Session) -> None` to solve the following problem: Upload coverage data. Here is the function: def coverage(session: Session) -> None: """Upload coverage data.""" install_with_constraints(session, "coverage[toml]", "codecov") session.run("coverage", "xml", "--fail-under=0") session.run("codecov", *session.posargs)
Upload coverage data.
867
from typing import Any import nox from nox.sessions import Session def install_with_constraints(session: Session, *args: str, **kwargs: Any) -> None: """Install packages constrained by Poetry's lock file. This function is a wrapper for nox.sessions.Session.install. It invokes pip to install packages inside of the session's virtualenv. Additionally, pip is passed a constraints file generated from Poetry's lock file, to ensure that the packages are pinned to the versions specified in poetry.lock. This allows you to manage the packages as Poetry development dependencies. Arguments: session: The Session object. args: Command-line arguments for pip. kwargs: Additional keyword arguments for Session.install. """ session.run( "poetry", "export", "--dev", "--format=requirements.txt", "--output=requirements.txt", external=True, ) session.install("--constraint=requirements.txt", *args, **kwargs) The provided code snippet includes necessary dependencies for implementing the `docs` function. Write a Python function `def docs(session: Session) -> None` to solve the following problem: Build the documentation. Here is the function: def docs(session: Session) -> None: """Build the documentation.""" install_with_constraints(session, "sphinx") session.run("sphinx-build", "docs", "docs/_build")
Build the documentation.
868
from typing import Any import nox from nox.sessions import Session def tests(session): session.run("pip", "install", "click", external=True) session.run("poetry", "install", external=True) session.run("poetry", "run", "pytest")
null
869
import numpy as np import torch The provided code snippet includes necessary dependencies for implementing the `_format_observation` function. Write a Python function `def _format_observation(obs, device)` to solve the following problem: A utility function to process observations and move them to CUDA. Here is the function: def _format_observation(obs, device): """ A utility function to process observations and move them to CUDA. """ position = obs['position'] device = torch.device('cuda:'+str(device)) x_batch = torch.from_numpy(obs['x_batch']).to(device) z_batch = torch.from_numpy(obs['z_batch']).to(device) x_no_action = torch.from_numpy(obs['x_no_action']) z = torch.from_numpy(obs['z']) obs = {'x_batch': x_batch, 'z_batch': z_batch, 'legal_actions': obs['legal_actions'], } return position, obs, x_no_action, z
A utility function to process observations and move them to CUDA.
870
import copy import datetime import csv import json import logging import os import time from typing import Dict import git def gather_metadata() -> Dict: date_start = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f') # gathering git metadata try: repo = git.Repo(search_parent_directories=True) git_sha = repo.commit().hexsha git_data = dict( commit=git_sha, branch=repo.active_branch.name, is_dirty=repo.is_dirty(), path=repo.git_dir, ) except git.InvalidGitRepositoryError: git_data = None # gathering slurm metadata if 'SLURM_JOB_ID' in os.environ: slurm_env_keys = [k for k in os.environ if k.startswith('SLURM')] slurm_data = {} for k in slurm_env_keys: d_key = k.replace('SLURM_', '').replace('SLURMD_', '').lower() slurm_data[d_key] = os.environ[k] else: slurm_data = None return dict( date_start=date_start, date_end=None, successful=False, git=git_data, slurm=slurm_data, env=os.environ.copy(), )
null
871
import os import threading import time import timeit import pprint from collections import deque import torch from torch import multiprocessing as mp from torch import nn from .file_writer import FileWriter from .models import Model from .utils import get_batch, log, create_env, create_buffers, create_optimizers, act def learn(position, actor_models, model, batch, optimizer, flags, lock): """Performs a learning (optimization) step.""" device = torch.device('cuda:'+str(flags.training_device)) obs_x_no_action = batch['obs_x_no_action'].to(device) obs_action = batch['obs_action'].to(device) obs_x = torch.cat((obs_x_no_action, obs_action), dim=2).float() obs_x = torch.flatten(obs_x, 0, 1) obs_z = torch.flatten(batch['obs_z'].to(device), 0, 1).float() target = torch.flatten(batch['target'].to(device), 0, 1) episode_returns = batch['episode_return'][batch['done']] mean_episode_return_buf[position].append(torch.mean(episode_returns).to(device)) with lock: learner_outputs = model(obs_z, obs_x, return_value=True) loss = compute_loss(learner_outputs['values'], target) stats = { 'mean_episode_return_'+position: torch.mean(torch.stack([_r for _r in mean_episode_return_buf[position]])).item(), 'loss_'+position: loss.item(), } optimizer.zero_grad() loss.backward() nn.utils.clip_grad_norm_(model.parameters(), flags.max_grad_norm) optimizer.step() for actor_model in actor_models: actor_model.get_model(position).load_state_dict(model.state_dict()) return stats class FileWriter: def __init__(self, xpid: str = None, xp_args: dict = None, rootdir: str = '~/palaas'): if not xpid: # make unique id xpid = '{proc}_{unixtime}'.format( proc=os.getpid(), unixtime=int(time.time())) self.xpid = xpid self._tick = 0 # metadata gathering if xp_args is None: xp_args = {} self.metadata = gather_metadata() # we need to copy the args, otherwise when we close the file writer # (and rewrite the args) we might have non-serializable objects (or # other nasty stuff). self.metadata['args'] = copy.deepcopy(xp_args) self.metadata['xpid'] = self.xpid formatter = logging.Formatter('%(message)s') self._logger = logging.getLogger('palaas/out') # to stdout handler shandle = logging.StreamHandler() shandle.setFormatter(formatter) self._logger.addHandler(shandle) self._logger.setLevel(logging.INFO) rootdir = os.path.expandvars(os.path.expanduser(rootdir)) # to file handler self.basepath = os.path.join(rootdir, self.xpid) if not os.path.exists(self.basepath): self._logger.info('Creating log directory: %s', self.basepath) os.makedirs(self.basepath, exist_ok=True) else: self._logger.info('Found log directory: %s', self.basepath) # NOTE: remove latest because it creates errors when running on slurm # multiple jobs trying to write to latest but cannot find it # Add 'latest' as symlink unless it exists and is no symlink. # symlink = os.path.join(rootdir, 'latest') # if os.path.islink(symlink): # os.remove(symlink) # if not os.path.exists(symlink): # os.symlink(self.basepath, symlink) # self._logger.info('Symlinked log directory: %s', symlink) self.paths = dict( msg='{base}/out.log'.format(base=self.basepath), logs='{base}/logs.csv'.format(base=self.basepath), fields='{base}/fields.csv'.format(base=self.basepath), meta='{base}/meta.json'.format(base=self.basepath), ) self._logger.info('Saving arguments to %s', self.paths['meta']) if os.path.exists(self.paths['meta']): self._logger.warning('Path to meta file already exists. ' 'Not overriding meta.') else: self._save_metadata() self._logger.info('Saving messages to %s', self.paths['msg']) if os.path.exists(self.paths['msg']): self._logger.warning('Path to message file already exists. ' 'New data will be appended.') fhandle = logging.FileHandler(self.paths['msg']) fhandle.setFormatter(formatter) self._logger.addHandler(fhandle) self._logger.info('Saving logs data to %s', self.paths['logs']) self._logger.info('Saving logs\' fields to %s', self.paths['fields']) if os.path.exists(self.paths['logs']): self._logger.warning('Path to log file already exists. ' 'New data will be appended.') with open(self.paths['fields'], 'r') as csvfile: reader = csv.reader(csvfile) self.fieldnames = list(reader)[0] else: self.fieldnames = ['_tick', '_time'] def log(self, to_log: Dict, tick: int = None, verbose: bool = False) -> None: if tick is not None: raise NotImplementedError else: to_log['_tick'] = self._tick self._tick += 1 to_log['_time'] = time.time() old_len = len(self.fieldnames) for k in to_log: if k not in self.fieldnames: self.fieldnames.append(k) if old_len != len(self.fieldnames): with open(self.paths['fields'], 'w') as csvfile: writer = csv.writer(csvfile) writer.writerow(self.fieldnames) self._logger.info('Updated log fields: %s', self.fieldnames) if to_log['_tick'] == 0: # print("\ncreating logs file ") with open(self.paths['logs'], 'a') as f: f.write('# %s\n' % ','.join(self.fieldnames)) if verbose: self._logger.info('LOG | %s', ', '.join( ['{}: {}'.format(k, to_log[k]) for k in sorted(to_log)])) with open(self.paths['logs'], 'a') as f: writer = csv.DictWriter(f, fieldnames=self.fieldnames) writer.writerow(to_log) # print("\nadded to log file") def close(self, successful: bool = True) -> None: self.metadata['date_end'] = datetime.datetime.now().strftime( '%Y-%m-%d %H:%M:%S.%f') self.metadata['successful'] = successful self._save_metadata() def _save_metadata(self) -> None: with open(self.paths['meta'], 'w') as jsonfile: json.dump(self.metadata, jsonfile, indent=4, sort_keys=True) class Model: """ The wrapper for the three models. We also wrap several interfaces such as share_memory, eval, etc. """ def __init__(self, device=0): self.models = {} self.models['landlord'] = LandlordLstmModel().to(torch.device('cuda:'+str(device))) self.models['landlord_up'] = FarmerLstmModel().to(torch.device('cuda:'+str(device))) self.models['landlord_down'] = FarmerLstmModel().to(torch.device('cuda:'+str(device))) def forward(self, position, z, x, training=False, flags=None): model = self.models[position] return model.forward(z, x, training, flags) def share_memory(self): self.models['landlord'].share_memory() self.models['landlord_up'].share_memory() self.models['landlord_down'].share_memory() def eval(self): self.models['landlord'].eval() self.models['landlord_up'].eval() self.models['landlord_down'].eval() def parameters(self, position): return self.models[position].parameters() def get_model(self, position): return self.models[position] def get_models(self): return self.models log = logging.getLogger('doudzero') log.propagate = False log.addHandler(shandle) log.setLevel(logging.INFO) def get_batch(free_queue, full_queue, buffers, flags, lock): """ This function will sample a batch from the buffers based on the indices received from the full queue. It will also free the indices by sending it to full_queue. """ with lock: indices = [full_queue.get() for _ in range(flags.batch_size)] batch = { key: torch.stack([buffers[key][m] for m in indices], dim=1) for key in buffers } for m in indices: free_queue.put(m) return batch def create_optimizers(flags, learner_model): """ Create three optimizers for the three positions """ positions = ['landlord', 'landlord_up', 'landlord_down'] optimizers = {} for position in positions: optimizer = torch.optim.RMSprop( learner_model.parameters(position), lr=flags.learning_rate, momentum=flags.momentum, eps=flags.epsilon, alpha=flags.alpha) optimizers[position] = optimizer return optimizers def create_buffers(flags): """ We create buffers for different positions as well as for different devices (i.e., GPU). That is, each device will have three buffers for the three positions. """ T = flags.unroll_length positions = ['landlord', 'landlord_up', 'landlord_down'] buffers = [] for device in range(torch.cuda.device_count()): buffers.append({}) for position in positions: x_dim = 319 if position == 'landlord' else 430 specs = dict( done=dict(size=(T,), dtype=torch.bool), episode_return=dict(size=(T,), dtype=torch.float32), target=dict(size=(T,), dtype=torch.float32), obs_x_no_action=dict(size=(T, x_dim), dtype=torch.int8), obs_action=dict(size=(T, 54), dtype=torch.int8), obs_z=dict(size=(T, 5, 162), dtype=torch.int8), ) _buffers: Buffers = {key: [] for key in specs} for _ in range(flags.num_buffers): for key in _buffers: _buffer = torch.empty(**specs[key]).to(torch.device('cuda:'+str(device))).share_memory_() _buffers[key].append(_buffer) buffers[device][position] = _buffers return buffers def act(i, device, free_queue, full_queue, model, buffers, flags): """ This function will run forever until we stop it. It will generate data from the environment and send the data to buffer. It uses a free queue and full queue to syncup with the main process. """ positions = ['landlord', 'landlord_up', 'landlord_down'] try: T = flags.unroll_length log.info('Device %i Actor %i started.', device, i) env = create_env(flags) env = Environment(env, device) done_buf = {p: [] for p in positions} episode_return_buf = {p: [] for p in positions} target_buf = {p: [] for p in positions} obs_x_no_action_buf = {p: [] for p in positions} obs_action_buf = {p: [] for p in positions} obs_z_buf = {p: [] for p in positions} size = {p: 0 for p in positions} position, obs, env_output = env.initial() while True: while True: obs_x_no_action_buf[position].append(env_output['obs_x_no_action']) obs_z_buf[position].append(env_output['obs_z']) with torch.no_grad(): agent_output = model.forward(position, obs['z_batch'], obs['x_batch'], flags=flags) _action_idx = int(agent_output['action'].cpu().detach().numpy()) action = obs['legal_actions'][_action_idx] obs_action_buf[position].append(_cards2tensor(action)) position, obs, env_output = env.step(action) size[position] += 1 if env_output['done']: for p in positions: diff = size[p] - len(target_buf[p]) if diff > 0: done_buf[p].extend([False for _ in range(diff-1)]) done_buf[p].append(True) episode_return = env_output['episode_return'] if p == 'landlord' else -env_output['episode_return'] episode_return_buf[p].extend([0.0 for _ in range(diff-1)]) episode_return_buf[p].append(episode_return) target_buf[p].extend([episode_return for _ in range(diff)]) break for p in positions: if size[p] > T: index = free_queue[p].get() if index is None: break for t in range(T): buffers[p]['done'][index][t, ...] = done_buf[p][t] buffers[p]['episode_return'][index][t, ...] = episode_return_buf[p][t] buffers[p]['target'][index][t, ...] = target_buf[p][t] buffers[p]['obs_x_no_action'][index][t, ...] = obs_x_no_action_buf[p][t] buffers[p]['obs_action'][index][t, ...] = obs_action_buf[p][t] buffers[p]['obs_z'][index][t, ...] = obs_z_buf[p][t] full_queue[p].put(index) done_buf[p] = done_buf[p][T:] episode_return_buf[p] = episode_return_buf[p][T:] target_buf[p] = target_buf[p][T:] obs_x_no_action_buf[p] = obs_x_no_action_buf[p][T:] obs_action_buf[p] = obs_action_buf[p][T:] obs_z_buf[p] = obs_z_buf[p][T:] size[p] -= T except KeyboardInterrupt: pass except Exception as e: log.error('Exception in worker process %i', i) traceback.print_exc() print() raise e The provided code snippet includes necessary dependencies for implementing the `train` function. Write a Python function `def train(flags)` to solve the following problem: This is the main funtion for training. It will first initilize everything, such as buffers, optimizers, etc. Then it will start subprocesses as actors. Then, it will call learning function with multiple threads. Here is the function: def train(flags): """ This is the main funtion for training. It will first initilize everything, such as buffers, optimizers, etc. Then it will start subprocesses as actors. Then, it will call learning function with multiple threads. """ plogger = FileWriter( xpid=flags.xpid, xp_args=flags.__dict__, rootdir=flags.savedir, ) checkpointpath = os.path.expandvars( os.path.expanduser('%s/%s/%s' % (flags.savedir, flags.xpid, 'model.tar'))) T = flags.unroll_length B = flags.batch_size # Initialize actor models models = [] assert flags.num_actor_devices <= len(flags.gpu_devices.split(',')), 'The number of actor devices can not exceed the number of available devices' for device in range(flags.num_actor_devices): model = Model(device=device) model.share_memory() model.eval() models.append(model) # Initialize buffers buffers = create_buffers(flags) # Initialize queues actor_processes = [] ctx = mp.get_context('spawn') free_queue = [] full_queue = [] for device in range(flags.num_actor_devices): _free_queue = {'landlord': ctx.SimpleQueue(), 'landlord_up': ctx.SimpleQueue(), 'landlord_down': ctx.SimpleQueue()} _full_queue = {'landlord': ctx.SimpleQueue(), 'landlord_up': ctx.SimpleQueue(), 'landlord_down': ctx.SimpleQueue()} free_queue.append(_free_queue) full_queue.append(_full_queue) # Learner model for training learner_model = Model(device=flags.training_device) # Create optimizers optimizers = create_optimizers(flags, learner_model) # Stat Keys stat_keys = [ 'mean_episode_return_landlord', 'loss_landlord', 'mean_episode_return_landlord_up', 'loss_landlord_up', 'mean_episode_return_landlord_down', 'loss_landlord_down', ] frames, stats = 0, {k: 0 for k in stat_keys} position_frames = {'landlord':0, 'landlord_up':0, 'landlord_down':0} # Load models if any if flags.load_model and os.path.exists(checkpointpath): checkpoint_states = torch.load( checkpointpath, map_location="cuda:"+str(flags.training_device) ) for k in ['landlord', 'landlord_up', 'landlord_down']: learner_model.get_model(k).load_state_dict(checkpoint_states["model_state_dict"][k]) optimizers[k].load_state_dict(checkpoint_states["optimizer_state_dict"][k]) for device in range(flags.num_actor_devices): models[device].get_model(k).load_state_dict(learner_model.get_model(k).state_dict()) stats = checkpoint_states["stats"] frames = checkpoint_states["frames"] position_frames = checkpoint_states["position_frames"] log.info(f"Resuming preempted job, current stats:\n{stats}") # Starting actor processes for device in range(flags.num_actor_devices): num_actors = flags.num_actors for i in range(flags.num_actors): actor = ctx.Process( target=act, args=(i, device, free_queue[device], full_queue[device], models[device], buffers[device], flags)) actor.start() actor_processes.append(actor) def batch_and_learn(i, device, position, local_lock, position_lock, lock=threading.Lock()): """Thread target for the learning process.""" nonlocal frames, position_frames, stats while frames < flags.total_frames: batch = get_batch(free_queue[device][position], full_queue[device][position], buffers[device][position], flags, local_lock) _stats = learn(position, models, learner_model.get_model(position), batch, optimizers[position], flags, position_lock) with lock: for k in _stats: stats[k] = _stats[k] to_log = dict(frames=frames) to_log.update({k: stats[k] for k in stat_keys}) plogger.log(to_log) frames += T * B position_frames[position] += T * B for device in range(flags.num_actor_devices): for m in range(flags.num_buffers): free_queue[device]['landlord'].put(m) free_queue[device]['landlord_up'].put(m) free_queue[device]['landlord_down'].put(m) threads = [] locks = [{'landlord': threading.Lock(), 'landlord_up': threading.Lock(), 'landlord_down': threading.Lock()} for _ in range(flags.num_actor_devices)] position_locks = {'landlord': threading.Lock(), 'landlord_up': threading.Lock(), 'landlord_down': threading.Lock()} for device in range(flags.num_actor_devices): for i in range(flags.num_threads): for position in ['landlord', 'landlord_up', 'landlord_down']: thread = threading.Thread( target=batch_and_learn, name='batch-and-learn-%d' % i, args=(i,device,position,locks[device][position],position_locks[position])) thread.start() threads.append(thread) def checkpoint(frames): if flags.disable_checkpoint: return log.info('Saving checkpoint to %s', checkpointpath) _models = learner_model.get_models() torch.save({ 'model_state_dict': {k: _models[k].state_dict() for k in _models}, 'optimizer_state_dict': {k: optimizers[k].state_dict() for k in optimizers}, "stats": stats, 'flags': vars(flags), 'frames': frames, 'position_frames': position_frames }, checkpointpath) # Save the weights for evaluation purpose for position in ['landlord', 'landlord_up', 'landlord_down']: model_weights_dir = os.path.expandvars(os.path.expanduser( '%s/%s/%s' % (flags.savedir, flags.xpid, position+'_weights_'+str(frames)+'.ckpt'))) torch.save(learner_model.get_model(position).state_dict(), model_weights_dir) timer = timeit.default_timer try: last_checkpoint_time = timer() - flags.save_interval * 60 while frames < flags.total_frames: start_frames = frames position_start_frames = {k: position_frames[k] for k in position_frames} start_time = timer() time.sleep(5) if timer() - last_checkpoint_time > flags.save_interval * 60: checkpoint(frames) last_checkpoint_time = timer() end_time = timer() fps = (frames - start_frames) / (end_time - start_time) position_fps = {k:(position_frames[k]-position_start_frames[k])/(end_time-start_time) for k in position_frames} log.info('After %i (L:%i U:%i D:%i) frames: @ %.1f fps (L:%.1f U:%.1f D:%.1f) Stats:\n%s', frames, position_frames['landlord'], position_frames['landlord_up'], position_frames['landlord_down'], fps, position_fps['landlord'], position_fps['landlord_up'], position_fps['landlord_down'], pprint.pformat(stats)) except KeyboardInterrupt: return else: for thread in threads: thread.join() log.info('Learning finished after %d frames.', frames) checkpoint(frames) plogger.close()
This is the main funtion for training. It will first initilize everything, such as buffers, optimizers, etc. Then it will start subprocesses as actors. Then, it will call learning function with multiple threads.
872
import random from rlcard.games.doudizhu.utils import CARD_TYPE INDEX = {'3': 0, '4': 1, '5': 2, '6': 3, '7': 4, '8': 5, '9': 6, 'T': 7, 'J': 8, 'Q': 9, 'K': 10, 'A': 11, '2': 12, 'B': 13, 'R': 14} def card_str2list(hand): hand_list = [0 for _ in range(15)] for card in hand: hand_list[INDEX[card]] += 1 return hand_list def list2card_str(hand_list): card_str = '' cards = [card for card in INDEX] for index, count in enumerate(hand_list): card_str += cards[index] * count return card_str def pick_chain(hand_list, count): chains = [] str_card = [card for card in INDEX] hand_list = [str(card) for card in hand_list] hand = ''.join(hand_list[:12]) chain_list = hand.split('0') add = 0 for index, chain in enumerate(chain_list): if len(chain) > 0: if len(chain) >= 5: start = index + add min_count = int(min(chain)) // count if min_count != 0: str_chain = '' for num in range(len(chain)): str_chain += str_card[start+num] hand_list[start+num] = int(hand_list[start+num]) - int(min(chain)) for _ in range(min_count): chains.append(str_chain) add += len(chain) hand_list = [int(card) for card in hand_list] return (chains, hand_list) The provided code snippet includes necessary dependencies for implementing the `combine_cards` function. Write a Python function `def combine_cards(hand)` to solve the following problem: Get optimal combinations of cards in hand Here is the function: def combine_cards(hand): '''Get optimal combinations of cards in hand ''' comb = {'rocket': [], 'bomb': [], 'trio': [], 'trio_chain': [], 'solo_chain': [], 'pair_chain': [], 'pair': [], 'solo': []} # 1. pick rocket if hand[-2:] == 'BR': comb['rocket'].append('BR') hand = hand[:-2] # 2. pick bomb hand_cp = hand for index in range(len(hand_cp) - 3): if hand_cp[index] == hand_cp[index+3]: bomb = hand_cp[index: index+4] comb['bomb'].append(bomb) hand = hand.replace(bomb, '') # 3. pick trio and trio_chain hand_cp = hand for index in range(len(hand_cp) - 2): if hand_cp[index] == hand_cp[index+2]: trio = hand_cp[index: index+3] if len(comb['trio']) > 0 and INDEX[trio[-1]] < 12 and (INDEX[trio[-1]]-1) == INDEX[comb['trio'][-1][-1]]: comb['trio'][-1] += trio else: comb['trio'].append(trio) hand = hand.replace(trio, '') only_trio = [] only_trio_chain = [] for trio in comb['trio']: if len(trio) == 3: only_trio.append(trio) else: only_trio_chain.append(trio) comb['trio'] = only_trio comb['trio_chain'] = only_trio_chain # 4. pick solo chain hand_list = card_str2list(hand) chains, hand_list = pick_chain(hand_list, 1) comb['solo_chain'] = chains # 5. pick par_chain chains, hand_list = pick_chain(hand_list, 2) comb['pair_chain'] = chains hand = list2card_str(hand_list) # 6. pick pair and solo index = 0 while index < len(hand) - 1: if hand[index] == hand[index+1]: comb['pair'].append(hand[index] + hand[index+1]) index += 2 else: comb['solo'].append(hand[index]) index += 1 if index == (len(hand) - 1): comb['solo'].append(hand[index]) return comb
Get optimal combinations of cards in hand
873
from douzero.env.game import GameEnv from .deep_agent import DeepAgent RealCard2EnvCard = {'3': 3, '4': 4, '5': 5, '6': 6, '7': 7, '8': 8, '9': 9, 'T': 10, 'J': 11, 'Q': 12, 'K': 13, 'A': 14, '2': 17, 'X': 20, 'D': 30} AllEnvCard = [3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 11, 11, 11, 11, 12, 12, 12, 12, 13, 13, 13, 13, 14, 14, 14, 14, 17, 17, 17, 17, 20, 30] class GameEnv(object): def __init__(self, players): self.card_play_action_seq = [] self.three_landlord_cards = None self.game_over = False self.acting_player_position = None self.player_utility_dict = None self.players = players self.last_move_dict = {'landlord': [], 'landlord_up': [], 'landlord_down': []} self.played_cards = {'landlord': [], 'landlord_up': [], 'landlord_down': []} self.last_move = [] self.last_two_moves = [] self.num_wins = {'landlord': 0, 'farmer': 0} self.num_scores = {'landlord': 0, 'farmer': 0} self.info_sets = {'landlord': InfoSet('landlord'), 'landlord_up': InfoSet('landlord_up'), 'landlord_down': InfoSet('landlord_down')} self.bomb_num = 0 self.last_pid = 'landlord' self.winner = "" def card_play_init(self, card_play_data): self.info_sets['landlord'].player_hand_cards = \ card_play_data['landlord'] self.info_sets['landlord_up'].player_hand_cards = \ card_play_data['landlord_up'] self.info_sets['landlord_down'].player_hand_cards = \ card_play_data['landlord_down'] self.three_landlord_cards = card_play_data['three_landlord_cards'] self.get_acting_player_position() self.game_infoset = self.get_infoset() def game_done(self): if len(self.info_sets['landlord'].player_hand_cards) == 0 or \ len(self.info_sets['landlord_up'].player_hand_cards) == 0 or \ len(self.info_sets['landlord_down'].player_hand_cards) == 0: # if one of the three players discards his hand, # then game is over. self.compute_player_utility() self.update_num_wins_scores() self.game_over = True def compute_player_utility(self): if len(self.info_sets['landlord'].player_hand_cards) == 0: self.player_utility_dict = {'landlord': 2, 'farmer': -1} else: self.player_utility_dict = {'landlord': -2, 'farmer': 1} def update_num_wins_scores(self): for pos, utility in self.player_utility_dict.items(): base_score = 2 if pos == 'landlord' else 1 if utility > 0: self.num_wins[pos] += 1 self.winner = pos self.num_scores[pos] += base_score * (2 ** self.bomb_num) else: self.num_scores[pos] -= base_score * (2 ** self.bomb_num) def get_winner(self): return self.winner def get_bomb_num(self): return self.bomb_num def step(self, position, action=[]): ''' # 是玩家角色就调用act函数通过智能体获取action,否则通过玩家输入获取action if self.acting_player_position == self.players[0]: action, actions_confidence = self.players[1].act(self.game_infoset) # 计算胜率 win_rates = {} win_rate = max(actions_confidence, -1) win_rate = min(win_rate, 1) win_rate = str(round(float((win_rate + 1) / 2), 4)) print("你出牌: " + str([EnvCard2RealCard[c] for c in action]) + ", 预计胜率" + str( round(float(win_rate) * 100, 2)) + "%\n") else: try: action = [RealCard2EnvCard[c] for c in list(input("地主{}出牌:".format( "上家" if self.acting_player_position == "landlord_up" else "下家" if self.acting_player_position == "landlord_down" else "")))] print(action, end="\n\n") # “要不起”,返回空列表 except ValueError as e: action = [] ''' win_rate = 0 if self.acting_player_position == position: action, actions_confidence = self.players[1].act(self.game_infoset) # 计算胜率 win_rate = max(actions_confidence, -1) win_rate = min(win_rate, 1) win_rate = str(round(float((win_rate + 1) / 2), 4)) if len(action) > 0: self.last_pid = self.acting_player_position if action in bombs: self.bomb_num += 1 self.last_move_dict[ self.acting_player_position] = action.copy() self.card_play_action_seq.append(action) self.update_acting_player_hand_cards(action) self.played_cards[self.acting_player_position] += action if self.acting_player_position == 'landlord' and \ len(action) > 0 and \ len(self.three_landlord_cards) > 0: for card in action: if len(self.three_landlord_cards) > 0: if card in self.three_landlord_cards: self.three_landlord_cards.remove(card) else: break self.game_done() if not self.game_over: self.get_acting_player_position() self.game_infoset = self.get_infoset() # 返回动作和胜率,只有玩家角色会接受返回值 action_message = {"action": str(''.join([EnvCard2RealCard[c] for c in action])), "win_rate": str(round(float(win_rate) * 100, 2)) + "%"} return action_message def get_last_move(self): last_move = [] if len(self.card_play_action_seq) != 0: if len(self.card_play_action_seq[-1]) == 0: last_move = self.card_play_action_seq[-2] else: last_move = self.card_play_action_seq[-1] return last_move def get_last_two_moves(self): last_two_moves = [[], []] for card in self.card_play_action_seq[-2:]: last_two_moves.insert(0, card) last_two_moves = last_two_moves[:2] return last_two_moves def get_acting_player_position(self): if self.acting_player_position is None: self.acting_player_position = 'landlord' else: if self.acting_player_position == 'landlord': self.acting_player_position = 'landlord_down' elif self.acting_player_position == 'landlord_down': self.acting_player_position = 'landlord_up' else: self.acting_player_position = 'landlord' return self.acting_player_position # 更新手牌 def update_acting_player_hand_cards(self, action): if action != []: # 更新玩家手牌,删除对应的牌 if self.acting_player_position == self.players[0]: for card in action: self.info_sets[self.acting_player_position].player_hand_cards.remove(card) # 更新另外两个玩家手牌,删除相同数量的牌 else: del self.info_sets[self.acting_player_position].player_hand_cards[0:len(action)] self.info_sets[self.acting_player_position].player_hand_cards.sort() def get_legal_card_play_actions(self): mg = MovesGener( self.info_sets[self.acting_player_position].player_hand_cards) action_sequence = self.card_play_action_seq rival_move = [] if len(action_sequence) != 0: if len(action_sequence[-1]) == 0: rival_move = action_sequence[-2] else: rival_move = action_sequence[-1] rival_type = md.get_move_type(rival_move) rival_move_type = rival_type['type'] rival_move_len = rival_type.get('len', 1) moves = list() if rival_move_type == md.TYPE_0_PASS: moves = mg.gen_moves() elif rival_move_type == md.TYPE_1_SINGLE: all_moves = mg.gen_type_1_single() moves = ms.filter_type_1_single(all_moves, rival_move) elif rival_move_type == md.TYPE_2_PAIR: all_moves = mg.gen_type_2_pair() moves = ms.filter_type_2_pair(all_moves, rival_move) elif rival_move_type == md.TYPE_3_TRIPLE: all_moves = mg.gen_type_3_triple() moves = ms.filter_type_3_triple(all_moves, rival_move) elif rival_move_type == md.TYPE_4_BOMB: all_moves = mg.gen_type_4_bomb() + mg.gen_type_5_king_bomb() moves = ms.filter_type_4_bomb(all_moves, rival_move) elif rival_move_type == md.TYPE_5_KING_BOMB: moves = [] elif rival_move_type == md.TYPE_6_3_1: all_moves = mg.gen_type_6_3_1() moves = ms.filter_type_6_3_1(all_moves, rival_move) elif rival_move_type == md.TYPE_7_3_2: all_moves = mg.gen_type_7_3_2() moves = ms.filter_type_7_3_2(all_moves, rival_move) elif rival_move_type == md.TYPE_8_SERIAL_SINGLE: all_moves = mg.gen_type_8_serial_single(repeat_num=rival_move_len) moves = ms.filter_type_8_serial_single(all_moves, rival_move) elif rival_move_type == md.TYPE_9_SERIAL_PAIR: all_moves = mg.gen_type_9_serial_pair(repeat_num=rival_move_len) moves = ms.filter_type_9_serial_pair(all_moves, rival_move) elif rival_move_type == md.TYPE_10_SERIAL_TRIPLE: all_moves = mg.gen_type_10_serial_triple(repeat_num=rival_move_len) moves = ms.filter_type_10_serial_triple(all_moves, rival_move) elif rival_move_type == md.TYPE_11_SERIAL_3_1: all_moves = mg.gen_type_11_serial_3_1(repeat_num=rival_move_len) moves = ms.filter_type_11_serial_3_1(all_moves, rival_move) elif rival_move_type == md.TYPE_12_SERIAL_3_2: all_moves = mg.gen_type_12_serial_3_2(repeat_num=rival_move_len) moves = ms.filter_type_12_serial_3_2(all_moves, rival_move) elif rival_move_type == md.TYPE_13_4_2: all_moves = mg.gen_type_13_4_2() moves = ms.filter_type_13_4_2(all_moves, rival_move) elif rival_move_type == md.TYPE_14_4_22: all_moves = mg.gen_type_14_4_22() moves = ms.filter_type_14_4_22(all_moves, rival_move) if rival_move_type not in [md.TYPE_0_PASS, md.TYPE_4_BOMB, md.TYPE_5_KING_BOMB]: moves = moves + mg.gen_type_4_bomb() + mg.gen_type_5_king_bomb() if len(rival_move) != 0: # rival_move is not 'pass' moves = moves + [[]] for m in moves: m.sort() return moves def reset(self): self.card_play_action_seq = [] self.three_landlord_cards = None self.game_over = False self.acting_player_position = None self.player_utility_dict = None self.last_move_dict = {'landlord': [], 'landlord_up': [], 'landlord_down': []} self.played_cards = {'landlord': [], 'landlord_up': [], 'landlord_down': []} self.last_move = [] self.last_two_moves = [] self.info_sets = {'landlord': InfoSet('landlord'), 'landlord_up': InfoSet('landlord_up'), 'landlord_down': InfoSet('landlord_down')} self.bomb_num = 0 self.last_pid = 'landlord' def get_infoset(self): self.info_sets[ self.acting_player_position].last_pid = self.last_pid self.info_sets[ self.acting_player_position].legal_actions = \ self.get_legal_card_play_actions() self.info_sets[ self.acting_player_position].bomb_num = self.bomb_num self.info_sets[ self.acting_player_position].last_move = self.get_last_move() self.info_sets[ self.acting_player_position].last_two_moves = self.get_last_two_moves() self.info_sets[ self.acting_player_position].last_move_dict = self.last_move_dict self.info_sets[self.acting_player_position].num_cards_left_dict = \ {pos: len(self.info_sets[pos].player_hand_cards) for pos in ['landlord', 'landlord_up', 'landlord_down']} self.info_sets[self.acting_player_position].other_hand_cards = [] ''' 调整计算其他人手牌的方法,整副牌减去玩家手牌与出过的牌 for pos in ['landlord', 'landlord_up', 'landlord_down']: if pos != self.acting_player_position: self.info_sets[ self.acting_player_position].other_hand_cards += \ self.info_sets[pos].player_hand_cards ''' # 把出过的牌中三个子列表合成一个列表 played_cards_tmp = [] for i in list(self.played_cards.values()): played_cards_tmp.extend(i) # 出过的牌和玩家手上的牌 played_and_hand_cards = played_cards_tmp + self.info_sets[self.acting_player_position].player_hand_cards # 整副牌减去出过的牌和玩家手上的牌,就是其他人的手牌 for i in set(AllEnvCard): self.info_sets[ self.acting_player_position].other_hand_cards.extend([i] * (AllEnvCard.count(i) - played_and_hand_cards.count(i))) self.info_sets[self.acting_player_position].played_cards = \ self.played_cards self.info_sets[self.acting_player_position].three_landlord_cards = \ self.three_landlord_cards self.info_sets[self.acting_player_position].card_play_action_seq = \ self.card_play_action_seq self.info_sets[ self.acting_player_position].all_handcards = \ {pos: self.info_sets[pos].player_hand_cards for pos in ['landlord', 'landlord_up', 'landlord_down']} return deepcopy(self.info_sets[self.acting_player_position]) class DeepAgent: def __init__(self, position, model_path): self.model = _load_model(position, model_path) def act(self, infoset): # 只有一个合法动作时直接返回,这样会得不到胜率信息 # if len(infoset.legal_actions) == 1: # return infoset.legal_actions[0], 0 obs = get_obs(infoset) z_batch = torch.from_numpy(obs['z_batch']).float() x_batch = torch.from_numpy(obs['x_batch']).float() if torch.cuda.is_available(): z_batch, x_batch = z_batch.cuda(), x_batch.cuda() y_pred = self.model.forward(z_batch, x_batch, return_value=True)['values'] y_pred = y_pred.detach().cpu().numpy() best_action_index = np.argmax(y_pred, axis=0)[0] best_action = infoset.legal_actions[best_action_index] best_action_confidence = y_pred[best_action_index] # print(best_action, best_action_confidence, y_pred) return best_action, best_action_confidence def evaluate(landlord, landlord_up, landlord_down): # 输入玩家的牌 user_hand_cards_real = input("请输入你的手牌, 例如 333456789TJQKA2XD:") # user_hand_cards_real = "34666777899TJJKA22XD" use_hand_cards_env = [RealCard2EnvCard[c] for c in list(user_hand_cards_real)] # 输入玩家角色 user_position_code = int(input("请输入你的角色[0:地主上家, 1:地主, 2:地主下家]:")) # user_position_code = 1 user_position = ['landlord_up', 'landlord', 'landlord_down'][user_position_code] # 输入三张底牌 three_landlord_cards_real = input("请输入三张底牌, 例如 2XD:") # three_landlord_cards_real = "2XD" three_landlord_cards_env = [RealCard2EnvCard[c] for c in list(three_landlord_cards_real)] # 整副牌减去玩家手上的牌,就是其他人的手牌,再分配给另外两个角色(如何分配对AI判断没有影响) other_hand_cards = [] for i in set(AllEnvCard): other_hand_cards.extend([i] * (AllEnvCard.count(i) - use_hand_cards_env.count(i))) card_play_data_list = [{}] card_play_data_list[0].update({ 'three_landlord_cards': three_landlord_cards_env, ['landlord_up', 'landlord', 'landlord_down'][(user_position_code + 0) % 3]: use_hand_cards_env, ['landlord_up', 'landlord', 'landlord_down'][(user_position_code + 1) % 3]: other_hand_cards[0:17] if (user_position_code + 1) % 3 != 1 else other_hand_cards[17:], ['landlord_up', 'landlord', 'landlord_down'][(user_position_code + 2) % 3]: other_hand_cards[0:17] if (user_position_code + 1) % 3 == 1 else other_hand_cards[17:] }) # 生成手牌结束,校验手牌数量 if len(card_play_data_list[0]["three_landlord_cards"]) != 3: print("底牌必须是3张\n") return if len(card_play_data_list[0]["landlord_up"]) != 17 or \ len(card_play_data_list[0]["landlord_down"]) != 17 or \ len(card_play_data_list[0]["landlord"]) != 20: print("初始手牌数目有误\n") return # print(card_play_data_list) card_play_model_path_dict = { 'landlord': landlord, 'landlord_up': landlord_up, 'landlord_down': landlord_down} print("创建代表玩家的AI...") players = {} players[user_position] = DeepAgent(user_position, card_play_model_path_dict[user_position]) env = GameEnv(players) for idx, card_play_data in enumerate(card_play_data_list): env.card_play_init(card_play_data) print("开始出牌\n") while not env.game_over: env.step() print("{}胜,本局结束!\n".format("农民" if env.winner == "farmer" else "地主")) env.reset()
null
874
import torch import numpy as np from douzero.env.env import get_obs model_dict = {} model_dict['landlord'] = LandlordLstmModel model_dict['landlord_up'] = FarmerLstmModel model_dict['landlord_down'] = FarmerLstmModel def _load_model(position, model_path): from douzero.dmc.models import model_dict model = model_dict[position]() model_state_dict = model.state_dict() if torch.cuda.is_available(): pretrained = torch.load(model_path, map_location='cuda:0') else: pretrained = torch.load(model_path, map_location='cpu') pretrained = {k: v for k, v in pretrained.items() if k in model_state_dict} model_state_dict.update(pretrained) model.load_state_dict(model_state_dict) if torch.cuda.is_available(): model.cuda() model.eval() return model
null
875
from douzero.env.utils import * import collections def is_continuous_seq(move): i = 0 while i < len(move) - 1: if move[i+1] - move[i] != 1: return False i += 1 return True def get_move_type(move): move_size = len(move) move_dict = collections.Counter(move) if move_size == 0: return {'type': TYPE_0_PASS} if move_size == 1: return {'type': TYPE_1_SINGLE, 'rank': move[0]} if move_size == 2: if move[0] == move[1]: return {'type': TYPE_2_PAIR, 'rank': move[0]} elif move == [20, 30]: # Kings return {'type': TYPE_5_KING_BOMB} else: return {'type': TYPE_15_WRONG} if move_size == 3: if len(move_dict) == 1: return {'type': TYPE_3_TRIPLE, 'rank': move[0]} else: return {'type': TYPE_15_WRONG} if move_size == 4: if len(move_dict) == 1: return {'type': TYPE_4_BOMB, 'rank': move[0]} elif len(move_dict) == 2: if move[0] == move[1] == move[2] or move[1] == move[2] == move[3]: return {'type': TYPE_6_3_1, 'rank': move[1]} else: return {'type': TYPE_15_WRONG} else: return {'type': TYPE_15_WRONG} if is_continuous_seq(move): return {'type': TYPE_8_SERIAL_SINGLE, 'rank': move[0], 'len': len(move)} if move_size == 5: if len(move_dict) == 2: return {'type': TYPE_7_3_2, 'rank': move[2]} else: return {'type': TYPE_15_WRONG} count_dict = collections.defaultdict(int) for c, n in move_dict.items(): count_dict[n] += 1 if move_size == 6: if (len(move_dict) == 2 or len(move_dict) == 3) and count_dict.get(4) == 1 and \ (count_dict.get(2) == 1 or count_dict.get(1) == 2): return {'type': TYPE_13_4_2, 'rank': move[2]} if move_size == 8 and (((len(move_dict) == 3 or len(move_dict) == 2) and (count_dict.get(4) == 1 and count_dict.get(2) == 2)) or count_dict.get(4) == 2): return {'type': TYPE_14_4_22, 'rank': max([c for c, n in move_dict.items() if n == 4])} mdkeys = sorted(move_dict.keys()) if len(move_dict) == count_dict.get(2) and is_continuous_seq(mdkeys): return {'type': TYPE_9_SERIAL_PAIR, 'rank': mdkeys[0], 'len': len(mdkeys)} if len(move_dict) == count_dict.get(3) and is_continuous_seq(mdkeys): return {'type': TYPE_10_SERIAL_TRIPLE, 'rank': mdkeys[0], 'len': len(mdkeys)} # Check Type 11 (serial 3+1) and Type 12 (serial 3+2) if count_dict.get(3, 0) >= MIN_TRIPLES: serial_3 = list() single = list() pair = list() for k, v in move_dict.items(): if v == 3: serial_3.append(k) elif v == 1: single.append(k) elif v == 2: pair.append(k) else: # no other possibilities return {'type': TYPE_15_WRONG} serial_3.sort() if is_continuous_seq(serial_3): if len(serial_3) == len(single)+len(pair)*2: return {'type': TYPE_11_SERIAL_3_1, 'rank': serial_3[0], 'len': len(serial_3)} if len(serial_3) == len(pair) and len(move_dict) == len(serial_3) * 2: return {'type': TYPE_12_SERIAL_3_2, 'rank': serial_3[0], 'len': len(serial_3)} if len(serial_3) == 4: if is_continuous_seq(serial_3[1:]): return {'type': TYPE_11_SERIAL_3_1, 'rank': serial_3[1], 'len': len(serial_3) - 1} if is_continuous_seq(serial_3[:-1]): return {'type': TYPE_11_SERIAL_3_1, 'rank': serial_3[0], 'len': len(serial_3) - 1} return {'type': TYPE_15_WRONG}
null
876
import itertools def select(cards, num): return [list(i) for i in itertools.combinations(cards, num)]
null
877
import collections def common_handle(moves, rival_move): new_moves = list() for move in moves: if move[0] > rival_move[0]: new_moves.append(move) return new_moves def filter_type_1_single(moves, rival_move): return common_handle(moves, rival_move)
null
878
import collections def common_handle(moves, rival_move): def filter_type_2_pair(moves, rival_move): return common_handle(moves, rival_move)
null
879
import collections def common_handle(moves, rival_move): new_moves = list() for move in moves: if move[0] > rival_move[0]: new_moves.append(move) return new_moves def filter_type_3_triple(moves, rival_move): return common_handle(moves, rival_move)
null
880
import collections def common_handle(moves, rival_move): new_moves = list() for move in moves: if move[0] > rival_move[0]: new_moves.append(move) return new_moves def filter_type_4_bomb(moves, rival_move): return common_handle(moves, rival_move)
null
881
import collections def filter_type_6_3_1(moves, rival_move): rival_move.sort() rival_rank = rival_move[1] new_moves = list() for move in moves: move.sort() my_rank = move[1] if my_rank > rival_rank: new_moves.append(move) return new_moves
null
882
import collections def filter_type_7_3_2(moves, rival_move): rival_move.sort() rival_rank = rival_move[2] new_moves = list() for move in moves: move.sort() my_rank = move[2] if my_rank > rival_rank: new_moves.append(move) return new_moves
null
883
import collections def common_handle(moves, rival_move): new_moves = list() for move in moves: if move[0] > rival_move[0]: new_moves.append(move) return new_moves def filter_type_8_serial_single(moves, rival_move): return common_handle(moves, rival_move)
null
884
import collections def common_handle(moves, rival_move): def filter_type_9_serial_pair(moves, rival_move): return common_handle(moves, rival_move)
null
885
import collections def common_handle(moves, rival_move): new_moves = list() for move in moves: if move[0] > rival_move[0]: new_moves.append(move) return new_moves def filter_type_10_serial_triple(moves, rival_move): return common_handle(moves, rival_move)
null
886
import collections def filter_type_11_serial_3_1(moves, rival_move): rival = collections.Counter(rival_move) rival_rank = max([k for k, v in rival.items() if v == 3]) new_moves = list() for move in moves: mymove = collections.Counter(move) my_rank = max([k for k, v in mymove.items() if v == 3]) if my_rank > rival_rank: new_moves.append(move) return new_moves
null
887
import collections def filter_type_12_serial_3_2(moves, rival_move): rival = collections.Counter(rival_move) rival_rank = max([k for k, v in rival.items() if v == 3]) new_moves = list() for move in moves: mymove = collections.Counter(move) my_rank = max([k for k, v in mymove.items() if v == 3]) if my_rank > rival_rank: new_moves.append(move) return new_moves
null
888
import collections def filter_type_13_4_2(moves, rival_move): rival_move.sort() rival_rank = rival_move[2] new_moves = list() for move in moves: move.sort() my_rank = move[2] if my_rank > rival_rank: new_moves.append(move) return new_moves
null
889
import collections def filter_type_14_4_22(moves, rival_move): rival = collections.Counter(rival_move) rival_rank = my_rank = 0 for k, v in rival.items(): if v == 4: rival_rank = k new_moves = list() for move in moves: mymove = collections.Counter(move) for k, v in mymove.items(): if v == 4: my_rank = k if my_rank > rival_rank: new_moves.append(move) return new_moves
null
890
from collections import Counter import numpy as np from douzero.env.game import GameEnv def _get_obs_landlord(infoset): """ Obttain the landlord features. See Table 4 in https://arxiv.org/pdf/2106.06135.pdf """ num_legal_actions = len(infoset.legal_actions) my_handcards = _cards2array(infoset.player_hand_cards) my_handcards_batch = np.repeat(my_handcards[np.newaxis, :], num_legal_actions, axis=0) other_handcards = _cards2array(infoset.other_hand_cards) other_handcards_batch = np.repeat(other_handcards[np.newaxis, :], num_legal_actions, axis=0) last_action = _cards2array(infoset.last_move) last_action_batch = np.repeat(last_action[np.newaxis, :], num_legal_actions, axis=0) my_action_batch = np.zeros(my_handcards_batch.shape) for j, action in enumerate(infoset.legal_actions): my_action_batch[j, :] = _cards2array(action) landlord_up_num_cards_left = _get_one_hot_array( infoset.num_cards_left_dict['landlord_up'], 17) landlord_up_num_cards_left_batch = np.repeat( landlord_up_num_cards_left[np.newaxis, :], num_legal_actions, axis=0) landlord_down_num_cards_left = _get_one_hot_array( infoset.num_cards_left_dict['landlord_down'], 17) landlord_down_num_cards_left_batch = np.repeat( landlord_down_num_cards_left[np.newaxis, :], num_legal_actions, axis=0) landlord_up_played_cards = _cards2array( infoset.played_cards['landlord_up']) landlord_up_played_cards_batch = np.repeat( landlord_up_played_cards[np.newaxis, :], num_legal_actions, axis=0) landlord_down_played_cards = _cards2array( infoset.played_cards['landlord_down']) landlord_down_played_cards_batch = np.repeat( landlord_down_played_cards[np.newaxis, :], num_legal_actions, axis=0) bomb_num = _get_one_hot_bomb( infoset.bomb_num) bomb_num_batch = np.repeat( bomb_num[np.newaxis, :], num_legal_actions, axis=0) x_batch = np.hstack((my_handcards_batch, other_handcards_batch, last_action_batch, landlord_up_played_cards_batch, landlord_down_played_cards_batch, landlord_up_num_cards_left_batch, landlord_down_num_cards_left_batch, bomb_num_batch, my_action_batch)) x_no_action = np.hstack((my_handcards, other_handcards, last_action, landlord_up_played_cards, landlord_down_played_cards, landlord_up_num_cards_left, landlord_down_num_cards_left, bomb_num)) z = _action_seq_list2array(_process_action_seq( infoset.card_play_action_seq)) z_batch = np.repeat( z[np.newaxis, :, :], num_legal_actions, axis=0) obs = { 'position': 'landlord', 'x_batch': x_batch.astype(np.float32), 'z_batch': z_batch.astype(np.float32), 'legal_actions': infoset.legal_actions, 'x_no_action': x_no_action.astype(np.int8), 'z': z.astype(np.int8), } return obs def _get_obs_landlord_up(infoset): """ Obttain the landlord_up features. See Table 5 in https://arxiv.org/pdf/2106.06135.pdf """ num_legal_actions = len(infoset.legal_actions) my_handcards = _cards2array(infoset.player_hand_cards) my_handcards_batch = np.repeat(my_handcards[np.newaxis, :], num_legal_actions, axis=0) other_handcards = _cards2array(infoset.other_hand_cards) other_handcards_batch = np.repeat(other_handcards[np.newaxis, :], num_legal_actions, axis=0) last_action = _cards2array(infoset.last_move) last_action_batch = np.repeat(last_action[np.newaxis, :], num_legal_actions, axis=0) my_action_batch = np.zeros(my_handcards_batch.shape) for j, action in enumerate(infoset.legal_actions): my_action_batch[j, :] = _cards2array(action) last_landlord_action = _cards2array( infoset.last_move_dict['landlord']) last_landlord_action_batch = np.repeat( last_landlord_action[np.newaxis, :], num_legal_actions, axis=0) landlord_num_cards_left = _get_one_hot_array( infoset.num_cards_left_dict['landlord'], 20) landlord_num_cards_left_batch = np.repeat( landlord_num_cards_left[np.newaxis, :], num_legal_actions, axis=0) landlord_played_cards = _cards2array( infoset.played_cards['landlord']) landlord_played_cards_batch = np.repeat( landlord_played_cards[np.newaxis, :], num_legal_actions, axis=0) last_teammate_action = _cards2array( infoset.last_move_dict['landlord_down']) last_teammate_action_batch = np.repeat( last_teammate_action[np.newaxis, :], num_legal_actions, axis=0) teammate_num_cards_left = _get_one_hot_array( infoset.num_cards_left_dict['landlord_down'], 17) teammate_num_cards_left_batch = np.repeat( teammate_num_cards_left[np.newaxis, :], num_legal_actions, axis=0) teammate_played_cards = _cards2array( infoset.played_cards['landlord_down']) teammate_played_cards_batch = np.repeat( teammate_played_cards[np.newaxis, :], num_legal_actions, axis=0) bomb_num = _get_one_hot_bomb( infoset.bomb_num) bomb_num_batch = np.repeat( bomb_num[np.newaxis, :], num_legal_actions, axis=0) x_batch = np.hstack((my_handcards_batch, other_handcards_batch, landlord_played_cards_batch, teammate_played_cards_batch, last_action_batch, last_landlord_action_batch, last_teammate_action_batch, landlord_num_cards_left_batch, teammate_num_cards_left_batch, bomb_num_batch, my_action_batch)) x_no_action = np.hstack((my_handcards, other_handcards, landlord_played_cards, teammate_played_cards, last_action, last_landlord_action, last_teammate_action, landlord_num_cards_left, teammate_num_cards_left, bomb_num)) z = _action_seq_list2array(_process_action_seq( infoset.card_play_action_seq)) z_batch = np.repeat( z[np.newaxis, :, :], num_legal_actions, axis=0) obs = { 'position': 'landlord_up', 'x_batch': x_batch.astype(np.float32), 'z_batch': z_batch.astype(np.float32), 'legal_actions': infoset.legal_actions, 'x_no_action': x_no_action.astype(np.int8), 'z': z.astype(np.int8), } return obs def _get_obs_landlord_down(infoset): """ Obttain the landlord_down features. See Table 5 in https://arxiv.org/pdf/2106.06135.pdf """ num_legal_actions = len(infoset.legal_actions) my_handcards = _cards2array(infoset.player_hand_cards) my_handcards_batch = np.repeat(my_handcards[np.newaxis, :], num_legal_actions, axis=0) other_handcards = _cards2array(infoset.other_hand_cards) other_handcards_batch = np.repeat(other_handcards[np.newaxis, :], num_legal_actions, axis=0) last_action = _cards2array(infoset.last_move) last_action_batch = np.repeat(last_action[np.newaxis, :], num_legal_actions, axis=0) my_action_batch = np.zeros(my_handcards_batch.shape) for j, action in enumerate(infoset.legal_actions): my_action_batch[j, :] = _cards2array(action) last_landlord_action = _cards2array( infoset.last_move_dict['landlord']) last_landlord_action_batch = np.repeat( last_landlord_action[np.newaxis, :], num_legal_actions, axis=0) landlord_num_cards_left = _get_one_hot_array( infoset.num_cards_left_dict['landlord'], 20) landlord_num_cards_left_batch = np.repeat( landlord_num_cards_left[np.newaxis, :], num_legal_actions, axis=0) landlord_played_cards = _cards2array( infoset.played_cards['landlord']) landlord_played_cards_batch = np.repeat( landlord_played_cards[np.newaxis, :], num_legal_actions, axis=0) last_teammate_action = _cards2array( infoset.last_move_dict['landlord_up']) last_teammate_action_batch = np.repeat( last_teammate_action[np.newaxis, :], num_legal_actions, axis=0) teammate_num_cards_left = _get_one_hot_array( infoset.num_cards_left_dict['landlord_up'], 17) teammate_num_cards_left_batch = np.repeat( teammate_num_cards_left[np.newaxis, :], num_legal_actions, axis=0) teammate_played_cards = _cards2array( infoset.played_cards['landlord_up']) teammate_played_cards_batch = np.repeat( teammate_played_cards[np.newaxis, :], num_legal_actions, axis=0) landlord_played_cards = _cards2array( infoset.played_cards['landlord']) landlord_played_cards_batch = np.repeat( landlord_played_cards[np.newaxis, :], num_legal_actions, axis=0) bomb_num = _get_one_hot_bomb( infoset.bomb_num) bomb_num_batch = np.repeat( bomb_num[np.newaxis, :], num_legal_actions, axis=0) x_batch = np.hstack((my_handcards_batch, other_handcards_batch, landlord_played_cards_batch, teammate_played_cards_batch, last_action_batch, last_landlord_action_batch, last_teammate_action_batch, landlord_num_cards_left_batch, teammate_num_cards_left_batch, bomb_num_batch, my_action_batch)) x_no_action = np.hstack((my_handcards, other_handcards, landlord_played_cards, teammate_played_cards, last_action, last_landlord_action, last_teammate_action, landlord_num_cards_left, teammate_num_cards_left, bomb_num)) z = _action_seq_list2array(_process_action_seq( infoset.card_play_action_seq)) z_batch = np.repeat( z[np.newaxis, :, :], num_legal_actions, axis=0) obs = { 'position': 'landlord_down', 'x_batch': x_batch.astype(np.float32), 'z_batch': z_batch.astype(np.float32), 'legal_actions': infoset.legal_actions, 'x_no_action': x_no_action.astype(np.int8), 'z': z.astype(np.int8), } return obs The provided code snippet includes necessary dependencies for implementing the `get_obs` function. Write a Python function `def get_obs(infoset)` to solve the following problem: This function obtains observations with imperfect information from the infoset. It has three branches since we encode different features for different positions. This function will return dictionary named `obs`. It contains several fields. These fields will be used to train the model. One can play with those features to improve the performance. `position` is a string that can be landlord/landlord_down/landlord_up `x_batch` is a batch of features (excluding the hisorical moves). It also encodes the action feature `z_batch` is a batch of features with hisorical moves only. `legal_actions` is the legal moves `x_no_action`: the features (exluding the hitorical moves and the action features). It does not have the batch dim. `z`: same as z_batch but not a batch. Here is the function: def get_obs(infoset): """ This function obtains observations with imperfect information from the infoset. It has three branches since we encode different features for different positions. This function will return dictionary named `obs`. It contains several fields. These fields will be used to train the model. One can play with those features to improve the performance. `position` is a string that can be landlord/landlord_down/landlord_up `x_batch` is a batch of features (excluding the hisorical moves). It also encodes the action feature `z_batch` is a batch of features with hisorical moves only. `legal_actions` is the legal moves `x_no_action`: the features (exluding the hitorical moves and the action features). It does not have the batch dim. `z`: same as z_batch but not a batch. """ if infoset.player_position == 'landlord': return _get_obs_landlord(infoset) elif infoset.player_position == 'landlord_up': return _get_obs_landlord_up(infoset) elif infoset.player_position == 'landlord_down': return _get_obs_landlord_down(infoset) else: raise ValueError('')
This function obtains observations with imperfect information from the infoset. It has three branches since we encode different features for different positions. This function will return dictionary named `obs`. It contains several fields. These fields will be used to train the model. One can play with those features to improve the performance. `position` is a string that can be landlord/landlord_down/landlord_up `x_batch` is a batch of features (excluding the hisorical moves). It also encodes the action feature `z_batch` is a batch of features with hisorical moves only. `legal_actions` is the legal moves `x_no_action`: the features (exluding the hitorical moves and the action features). It does not have the batch dim. `z`: same as z_batch but not a batch.
891
from lib.email import send_email from lib.db import create_user, find_user from lib.log import log from lib.slack import post_slack_message def send_email(name: str, address: str, subject: str, body: str): print(f"Sending email to {name} ({address})") print("==========") print(f"Subject: {subject}\n") print(body) def find_user(email: str): for user in users: if user.email == email: return user raise Exception(f"User with email address {email} not found.") def log(msg: str): print(f"{datetime.now()} - {msg}") def post_slack_message(channel: str, msg: str): print(f"[SlackBot - {channel}]: {msg}") def upgrade_plan(email: str): # find the user user = find_user(email) # upgrade the plan user.plan = "paid" # post a Slack message to sales department post_slack_message("sales", f"{user.name} has upgraded their plan.") # send a thank you email send_email(user.name, user.email, "Thank you", f"Thanks for upgrading, {user.name}! You're gonna love it. \nRegards, The DevNotes team") # write server log log(f"User with email address {user.email} has upgraded their plan")
null
892
from lib.email import send_email from lib.db import create_user, find_user from lib.log import log from lib.slack import post_slack_message from lib.stringtools import get_random_string def send_email(name: str, address: str, subject: str, body: str): print(f"Sending email to {name} ({address})") print("==========") print(f"Subject: {subject}\n") print(body) def create_user(name: str, password: str, email: str): print(f"DB: creating user database entry for {name} ({email}).") new_user = User(name, password, email) users.append(new_user) return new_user def log(msg: str): print(f"{datetime.now()} - {msg}") def post_slack_message(channel: str, msg: str): print(f"[SlackBot - {channel}]: {msg}") def register_new_user(name: str, password: str, email: str): # create an entry in the database user = create_user(name, password, email) # post a Slack message to sales department post_slack_message("sales", f"{user.name} has registered with email address {user.email}. Please spam this person incessantly.") # send a welcome email send_email(user.name, user.email, "Welcome", f"Thanks for registering, {user.name}!\nRegards, The DevNotes team") # write server log log(f"User registered with email address {user.email}")
null
893
from lib.email import send_email from lib.db import create_user, find_user from lib.log import log from lib.slack import post_slack_message from lib.stringtools import get_random_string def send_email(name: str, address: str, subject: str, body: str): print(f"Sending email to {name} ({address})") print("==========") print(f"Subject: {subject}\n") print(body) def find_user(email: str): for user in users: if user.email == email: return user raise Exception(f"User with email address {email} not found.") def log(msg: str): print(f"{datetime.now()} - {msg}") def get_random_string(length): letters = string.ascii_lowercase return ''.join(random.choice(letters) for i in range(length)) def password_forgotten(email: str): # retrieve the user user = find_user(email) # generate a password reset code user.reset_code = get_random_string(16) # send a password reset message send_email(user.name, user.email, "Reset your password", f"To reset your password, use this very secure code: {user.reset_code}.\nRegards, The DevNotes team") # write server log log(f"User with email address {user.email} requested a password reset")
null
894
from lib.slack import post_slack_message from .event import subscribe def handle_user_registered_event(user): post_slack_message("sales", f"{user.name} has registered with email address {user.email}. Please spam this person incessantly.") def handle_user_upgrade_plan_event(user): post_slack_message("sales", f"{user.name} has upgraded their plan.") def subscribe(event_type: str, fn): subscribers[event_type].append(fn) def setup_slack_event_handlers(): subscribe("user_registered", handle_user_registered_event) subscribe("user_upgrade_plan", handle_user_upgrade_plan_event)
null
895
from lib.db import create_user, find_user from .event import post_event def find_user(email: str): for user in users: if user.email == email: return user raise Exception(f"User with email address {email} not found.") def post_event(event_type: str, data): if not event_type in subscribers: return for fn in subscribers[event_type]: fn(data) def upgrade_plan(email: str): # find the user user = find_user(email) # upgrade the plan user.plan = "paid" # post an event post_event("user_upgrade_plan", user)
null
896
from lib.log import log from .event import subscribe def handle_user_registered_event(user): log(f"User registered with email address {user.email}") def handle_user_password_forgotten_event(user): log(f"User with email address {user.email} requested a password reset") def handle_user_upgrade_plan_event(user): log(f"User with email address {user.email} has upgraded their plan") def subscribe(event_type: str, fn): subscribers[event_type].append(fn) def setup_log_event_handlers(): subscribe("user_registered", handle_user_registered_event) subscribe("user_password_forgotten", handle_user_password_forgotten_event) subscribe("user_upgrade_plan", handle_user_upgrade_plan_event)
null
897
from lib.db import create_user, find_user from lib.stringtools import get_random_string from .event import post_event def create_user(name: str, password: str, email: str): print(f"DB: creating user database entry for {name} ({email}).") new_user = User(name, password, email) users.append(new_user) return new_user def post_event(event_type: str, data): if not event_type in subscribers: return for fn in subscribers[event_type]: fn(data) def register_new_user(name: str, password: str, email: str): # create an entry in the database user = create_user(name, password, email) # post an event post_event("user_registered", user)
null
898
from lib.db import create_user, find_user from lib.stringtools import get_random_string from .event import post_event def find_user(email: str): for user in users: if user.email == email: return user raise Exception(f"User with email address {email} not found.") def get_random_string(length): letters = string.ascii_lowercase return ''.join(random.choice(letters) for i in range(length)) def post_event(event_type: str, data): if not event_type in subscribers: return for fn in subscribers[event_type]: fn(data) def password_forgotten(email: str): # retrieve the user user = find_user(email) # generate a password reset code user.reset_code = get_random_string(16) # post an event post_event("user_password_forgotten", user)
null
899
from lib.db import create_user, find_user from lib.stringtools import get_random_string from .event import post_event def find_user(email: str): for user in users: if user.email == email: return user raise Exception(f"User with email address {email} not found.") def reset_password(code: str, email: str, password: str): # retrieve the user user = find_user(email) # reset the password user.reset_password(code, password)
null
900
from lib.email import send_email from .event import subscribe def handle_user_registered_event(user): # send a welcome email send_email(user.name, user.email, "Welcome", f"Thanks for registering, {user.name}!\nRegards, The DevNotes team") def handle_user_password_forgotten_event(user): # send a password reset message send_email(user.name, user.email, "Reset your password", f"To reset your password, use this very secure code: {user.reset_code}.\nRegards, The DevNotes team") def handle_user_upgrade_plan_event(user): # send a thank you email send_email(user.name, user.email, "Thank you", f"Thanks for upgrading, {user.name}! You're gonna love it. \nRegards, The DevNotes team") def subscribe(event_type: str, fn): subscribers[event_type].append(fn) def setup_email_event_handlers(): subscribe("user_registered", handle_user_registered_event) subscribe("user_password_forgotten", handle_user_password_forgotten_event) subscribe("user_upgrade_plan", handle_user_upgrade_plan_event)
null
901
from dataclasses import dataclass, field import string import random from typing import List, Callable def generate_id(length: int = 8) -> str: # helper function for generating an id return ''.join(random.choices(string.ascii_uppercase, k=length))
null