code
stringlengths
66
870k
docstring
stringlengths
19
26.7k
func_name
stringlengths
1
138
language
stringclasses
1 value
repo
stringlengths
7
68
path
stringlengths
5
324
url
stringlengths
46
389
license
stringclasses
7 values
def list_models(path): """ Lists all model files in the given directory. Args: path (str): The directory path to search for model files. Returns: list: A list of model file paths. """ nonlocal current_model_dir current_model_dir = path return list(list_files(path, exts=[".safetensors"], all=True))
Lists all model files in the given directory. Args: path (str): The directory path to search for model files. Returns: list: A list of model file paths.
list_models
python
bmaltais/kohya_ss
kohya_gui/convert_lcm_gui.py
https://github.com/bmaltais/kohya_ss/blob/master/kohya_gui/convert_lcm_gui.py
Apache-2.0
def list_save_to(path): """ Lists all save-to options for the given directory. Args: path (str): The directory path to search for save-to options. Returns: list: A list of save-to options. """ nonlocal current_save_dir current_save_dir = path return list(list_files(path, exts=[".safetensors"], all=True))
Lists all save-to options for the given directory. Args: path (str): The directory path to search for save-to options. Returns: list: A list of save-to options.
list_save_to
python
bmaltais/kohya_ss
kohya_gui/convert_lcm_gui.py
https://github.com/bmaltais/kohya_ss/blob/master/kohya_gui/convert_lcm_gui.py
Apache-2.0
def _get_caption_path(image_file, images_dir, caption_ext): """ Returns the expected path of a caption file for a given image path """ caption_file_name = os.path.splitext(image_file)[0] + caption_ext caption_file_path = os.path.join(images_dir, caption_file_name) return caption_file_path
Returns the expected path of a caption file for a given image path
_get_caption_path
python
bmaltais/kohya_ss
kohya_gui/manual_caption_gui.py
https://github.com/bmaltais/kohya_ss/blob/master/kohya_gui/manual_caption_gui.py
Apache-2.0
def _get_quick_tags(quick_tags_text): """ Gets a list of tags from the quick tags text box """ quick_tags = [t.strip() for t in quick_tags_text.split(",") if t.strip()] quick_tags_set = set(quick_tags) return quick_tags, quick_tags_set
Gets a list of tags from the quick tags text box
_get_quick_tags
python
bmaltais/kohya_ss
kohya_gui/manual_caption_gui.py
https://github.com/bmaltais/kohya_ss/blob/master/kohya_gui/manual_caption_gui.py
Apache-2.0
def _get_tag_checkbox_updates(caption, quick_tags, quick_tags_set): """ Updates a list of caption checkboxes to show possible tags and tags already included in the caption """ caption_tags_have = [c.strip() for c in caption.split(",") if c.strip()] caption_tags_unique = [t for t in caption_tags_have if t not in quick_tags_set] caption_tags_all = quick_tags + caption_tags_unique return gr.CheckboxGroup(choices=caption_tags_all, value=caption_tags_have)
Updates a list of caption checkboxes to show possible tags and tags already included in the caption
_get_tag_checkbox_updates
python
bmaltais/kohya_ss
kohya_gui/manual_caption_gui.py
https://github.com/bmaltais/kohya_ss/blob/master/kohya_gui/manual_caption_gui.py
Apache-2.0
def import_tags_from_captions( images_dir, caption_ext, quick_tags_text, ignore_load_tags_word_count ): """ Scans images directory for all available captions and loads all tags under a specified word count into the quick tags box """ def empty_return(): return gr.Text() # Check for images_dir if not images_dir: msgbox("Image folder is missing...") return empty_return() if not os.path.exists(images_dir): msgbox("Image folder does not exist...") return empty_return() if not caption_ext: msgbox("Please provide an extension for the caption files.") return empty_return() if quick_tags_text: if not boolbox( f"Are you sure you wish to overwrite the current quick tags?", choices=("Yes", "No"), ): return empty_return() images_list = os.listdir(images_dir) image_files = [f for f in images_list if f.lower().endswith(IMAGE_EXTENSIONS)] # Use a set for lookup but store order with list tags = [] tags_set = set() for image_file in image_files: caption_file_path = _get_caption_path(image_file, images_dir, caption_ext) if os.path.exists(caption_file_path): with open(caption_file_path, "r", encoding="utf-8") as f: caption = f.read() for tag in caption.split(","): tag = tag.strip() tag_key = tag.lower() if not tag_key in tags_set: # Ignore extra spaces total_words = len(re.findall(r"\s+", tag)) + 1 if total_words <= ignore_load_tags_word_count: tags.append(tag) tags_set.add(tag_key) return ", ".join(tags)
Scans images directory for all available captions and loads all tags under a specified word count into the quick tags box
import_tags_from_captions
python
bmaltais/kohya_ss
kohya_gui/manual_caption_gui.py
https://github.com/bmaltais/kohya_ss/blob/master/kohya_gui/manual_caption_gui.py
Apache-2.0
def load_images(images_dir, caption_ext, loaded_images_dir, page, max_page): """ Triggered to load a new set of images from the folder to caption This loads in the total expected image counts to be used by pagination before running update_images """ def empty_return(): return [loaded_images_dir, page, max_page] # Check for images_dir if not images_dir: msgbox("Image folder is missing...") return empty_return() if not os.path.exists(images_dir): msgbox("Image folder does not exist...") return empty_return() if not caption_ext: msgbox("Please provide an extension for the caption files.") return empty_return() # Load Images images_list = os.listdir(images_dir) total_images = len( [True for f in images_list if f.lower().endswith(IMAGE_EXTENSIONS)] ) return [images_dir, 1, ceil(total_images / IMAGES_TO_SHOW)]
Triggered to load a new set of images from the folder to caption This loads in the total expected image counts to be used by pagination before running update_images
load_images
python
bmaltais/kohya_ss
kohya_gui/manual_caption_gui.py
https://github.com/bmaltais/kohya_ss/blob/master/kohya_gui/manual_caption_gui.py
Apache-2.0
def update_images( images_dir, caption_ext, quick_tags_text, page, ): """ Updates the displayed images and captions from the current page and image directory """ # Load Images images_list = os.listdir(images_dir) image_files = [f for f in images_list if f.lower().endswith(IMAGE_EXTENSIONS)] # Quick tags quick_tags, quick_tags_set = _get_quick_tags(quick_tags_text or "") # Display Images rows = [] image_paths = [] captions = [] tag_checkbox_groups = [] start_index = (int(page) - 1) * IMAGES_TO_SHOW for i in range(IMAGES_TO_SHOW): image_index = start_index + i show_row = image_index < len(image_files) image_path = None caption = "" tag_checkboxes = None if show_row: image_file = image_files[image_index] image_path = os.path.join(images_dir, image_file) caption_file_path = _get_caption_path(image_file, images_dir, caption_ext) if os.path.exists(caption_file_path): with open(caption_file_path, "r", encoding="utf-8") as f: caption = f.read() tag_checkboxes = _get_tag_checkbox_updates(caption, quick_tags, quick_tags_set) rows.append(gr.Row(visible=show_row)) image_paths.append(image_path) captions.append(caption) tag_checkbox_groups.append(tag_checkboxes) return ( rows + image_paths + image_paths + captions + tag_checkbox_groups + [gr.Row(visible=True), gr.Row(visible=True)] )
Updates the displayed images and captions from the current page and image directory
update_images
python
bmaltais/kohya_ss
kohya_gui/manual_caption_gui.py
https://github.com/bmaltais/kohya_ss/blob/master/kohya_gui/manual_caption_gui.py
Apache-2.0
def check_python_version(): """ Check if the current Python version is within the acceptable range. Returns: bool: True if the current Python version is valid, False otherwise. """ log.debug("Checking Python version...") try: current_version = sys.version_info log.info(f"Python version is {sys.version}") if not (MIN_PYTHON_VERSION <= current_version < MAX_PYTHON_VERSION): log.error( f"The current version of python ({sys.version}) is not supported." ) log.error("The Python version must be >= 3.10.9 and < 3.13.0.") return False return True except Exception as e: log.error(f"Failed to verify Python version. Error: {e}") return False
Check if the current Python version is within the acceptable range. Returns: bool: True if the current Python version is valid, False otherwise.
check_python_version
python
bmaltais/kohya_ss
setup/setup_common.py
https://github.com/bmaltais/kohya_ss/blob/master/setup/setup_common.py
Apache-2.0
def update_submodule(quiet=True): """ Ensure the submodule is initialized and updated. """ log.debug("Updating submodule...") git_command = ["git", "submodule", "update", "--init", "--recursive"] if quiet: git_command.append("--quiet") try: subprocess.run(git_command, check=True) log.info("Submodule initialized and updated.") except subprocess.CalledProcessError as e: log.error(f"Error during Git operation: {e}") except FileNotFoundError as e: log.error(e)
Ensure the submodule is initialized and updated.
update_submodule
python
bmaltais/kohya_ss
setup/setup_common.py
https://github.com/bmaltais/kohya_ss/blob/master/setup/setup_common.py
Apache-2.0
def clone_or_checkout(repo_url, branch_or_tag, directory_name): """ Clone a repo or checkout a specific branch or tag if the repo already exists. """ log.debug( f"Cloning or checking out repository: {repo_url}, branch/tag: {branch_or_tag}, directory: {directory_name}" ) original_dir = os.getcwd() try: if not os.path.exists(directory_name): run_cmd = [ "git", "clone", "--branch", branch_or_tag, "--single-branch", "--quiet", repo_url, directory_name, ] log.debug(f"Cloning repository: {run_cmd}") subprocess.run(run_cmd, check=True) log.info(f"Successfully cloned {repo_url} ({branch_or_tag})") else: os.chdir(directory_name) log.debug("Fetching all branches and tags...") subprocess.run(["git", "fetch", "--all", "--quiet"], check=True) subprocess.run( ["git", "config", "advice.detachedHead", "false"], check=True ) current_branch_hash = ( subprocess.check_output(["git", "rev-parse", "HEAD"]).strip().decode() ) target_branch_hash = ( subprocess.check_output(["git", "rev-parse", branch_or_tag]) .strip() .decode() ) if current_branch_hash != target_branch_hash: log.debug(f"Checking out branch/tag: {branch_or_tag}") subprocess.run( ["git", "checkout", branch_or_tag, "--quiet"], check=True ) log.info(f"Checked out {branch_or_tag} successfully.") else: log.info(f"Already at required branch/tag: {branch_or_tag}") except subprocess.CalledProcessError as e: log.error(f"Error during Git operation: {e}") finally: os.chdir(original_dir)
Clone a repo or checkout a specific branch or tag if the repo already exists.
clone_or_checkout
python
bmaltais/kohya_ss
setup/setup_common.py
https://github.com/bmaltais/kohya_ss/blob/master/setup/setup_common.py
Apache-2.0
def setup_logging(): """ Set up logging to file and console. """ log.debug("Setting up logging...") from rich.theme import Theme from rich.logging import RichHandler from rich.console import Console console = Console( log_time=True, log_time_format="%H:%M:%S-%f", theme=Theme({"traceback.border": "black", "inspect.value.border": "black"}), ) current_datetime_str = datetime.datetime.now().strftime("%Y%m%d-%H%M%S") log_file = os.path.join( os.path.dirname(__file__), f"{LOG_DIR}kohya_ss_gui_{current_datetime_str}.log" ) os.makedirs(os.path.dirname(log_file), exist_ok=True) logging.basicConfig( level=logging.ERROR, format="%(asctime)s | %(name)s | %(levelname)s | %(module)s | %(message)s", filename=log_file, filemode="a", encoding="utf-8", force=True, ) log_level = os.getenv("LOG_LEVEL", LOG_LEVEL).upper() log.setLevel(getattr(logging, log_level, logging.DEBUG)) rich_handler = RichHandler(console=console) # Replace existing handlers with the rich handler log.handlers.clear() log.addHandler(rich_handler) log.debug("Logging setup complete.")
Set up logging to file and console.
setup_logging
python
bmaltais/kohya_ss
setup/setup_common.py
https://github.com/bmaltais/kohya_ss/blob/master/setup/setup_common.py
Apache-2.0
def check_repo_version(): """ This function checks the version of the repository by reading the contents of a file named '.release' in the current directory. If the file exists, it reads the release version from the file and logs it. If the file does not exist, it logs a debug message indicating that the release could not be read. """ log.debug("Checking repository version...") if os.path.exists(".release"): try: with open(os.path.join("./.release"), "r", encoding="utf8") as file: release = file.read() log.info(f"Kohya_ss GUI version: {release}") except Exception as e: log.error(f"Could not read release: {e}") else: log.debug("Could not read release...")
This function checks the version of the repository by reading the contents of a file named '.release' in the current directory. If the file exists, it reads the release version from the file and logs it. If the file does not exist, it logs a debug message indicating that the release could not be read.
check_repo_version
python
bmaltais/kohya_ss
setup/setup_common.py
https://github.com/bmaltais/kohya_ss/blob/master/setup/setup_common.py
Apache-2.0
def pip(arg: str, ignore: bool = False, quiet: bool = False, show_stdout: bool = False): """ Executes a pip command with the specified arguments. This function is designed to run pip commands and handle their output. It can be used to install, upgrade, or uninstall packages using pip. If an error occurs during the pip operation and the 'ignore' flag is not set, it logs the error message and the pip output for debugging purposes. Parameters: - arg: A string containing the pip command arguments. - ignore: A boolean flag indicating whether to ignore errors during the pip operation. If set to True, errors will not be logged. - quiet: A boolean flag indicating whether to suppress the output of the pip command. If set to True, the function will not log any output. - show_stdout: A boolean flag indicating whether to display the pip command's output to the console. If set to True, the function will print the output to the console. Returns: - The output of the pip command as a string, or None if the 'show_stdout' flag is set. """ log.debug(f"Running pip command: {arg}") if not quiet: log.info( f'Installing package: {arg.replace("install", "").replace("--upgrade", "").replace("--no-deps", "").replace("--force", "").replace(" ", " ").strip()}' ) # pip_cmd = [rf"{sys.executable}", "-m", "pip"] + arg.split(" ") pip_cmd = [rf"{sys.executable}", "-m", "pip"] + arg.split(" ") if installed("uv"): log.info("Using uv for pip...") pip_cmd.insert(2, "uv") log.debug(f"Running pip: {pip_cmd}") if show_stdout or installed("uv"): subprocess.run(pip_cmd, shell=False, check=False, env=os.environ) else: result = subprocess.run( pip_cmd, shell=False, check=False, env=os.environ, stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) txt = result.stdout.decode(encoding="utf8", errors="ignore") if len(result.stderr) > 0: txt += ("\n" if len(txt) > 0 else "") + result.stderr.decode( encoding="utf8", errors="ignore" ) txt = txt.strip() if result.returncode != 0 and not ignore: log.error(f"Error running pip: {arg}") log.error(f"Pip output: {txt}") return txt
Executes a pip command with the specified arguments. This function is designed to run pip commands and handle their output. It can be used to install, upgrade, or uninstall packages using pip. If an error occurs during the pip operation and the 'ignore' flag is not set, it logs the error message and the pip output for debugging purposes. Parameters: - arg: A string containing the pip command arguments. - ignore: A boolean flag indicating whether to ignore errors during the pip operation. If set to True, errors will not be logged. - quiet: A boolean flag indicating whether to suppress the output of the pip command. If set to True, the function will not log any output. - show_stdout: A boolean flag indicating whether to display the pip command's output to the console. If set to True, the function will print the output to the console. Returns: - The output of the pip command as a string, or None if the 'show_stdout' flag is set.
pip
python
bmaltais/kohya_ss
setup/setup_common.py
https://github.com/bmaltais/kohya_ss/blob/master/setup/setup_common.py
Apache-2.0
def installed(package, friendly: str = None): """ Checks if the specified package(s) are installed with the correct version. This function can handle package specifications with or without version constraints, and can also filter out command-line options and URLs when a 'friendly' string is provided. Parameters: - package: A string that specifies one or more packages with optional version constraints. - friendly: An optional string used to provide a cleaner version of the package string that excludes command-line options and URLs. Returns: - True if all specified packages are installed with the correct versions, False otherwise. Note: This function was adapted from code written by vladimandic. """ log.debug(f"Checking if package is installed: {package}") # Remove any optional features specified in brackets (e.g., "package[option]==version" becomes "package==version") package = re.sub(r"\[.*?\]", "", package) try: if friendly: # If a 'friendly' version of the package string is provided, split it into components pkgs = friendly.split() # Filter out command-line options and URLs from the package specification pkgs = [ p for p in package.split() if not p.startswith("--") and "://" not in p ] else: # Split the package string into components, excluding '-' and '=' prefixed items pkgs = [ p for p in package.split() if not p.startswith("-") and not p.startswith("=") ] # For each package component, extract the package name, excluding any URLs pkgs = [p.split("/")[-1] for p in pkgs] for pkg in pkgs: # Parse the package name and version based on the version specifier used if ">=" in pkg: pkg_name, pkg_version = [x.strip() for x in pkg.split(">=")] elif "==" in pkg: pkg_name, pkg_version = [x.strip() for x in pkg.split("==")] else: pkg_name, pkg_version = pkg.strip(), None # Attempt to find the installed package by its name spec = pkg_resources.working_set.by_key.get(pkg_name, None) if spec is None: # Try again with lowercase name spec = pkg_resources.working_set.by_key.get(pkg_name.lower(), None) if spec is None: # Try replacing underscores with dashes spec = pkg_resources.working_set.by_key.get( pkg_name.replace("_", "-"), None ) if spec is not None: # Package is found, check version version = pkg_resources.get_distribution(pkg_name).version log.debug(f"Package version found: {pkg_name} {version}") if pkg_version is not None: # Verify if the installed version meets the specified constraints if ">=" in pkg: ok = version >= pkg_version else: ok = version == pkg_version if not ok: # Version mismatch, log warning and return False log.warning( f"Package wrong version: {pkg_name} {version} required {pkg_version}" ) return False else: # Package not found, log debug message and return False log.debug(f"Package version not found: {pkg_name}") return False # All specified packages are installed with the correct versions return True except ModuleNotFoundError: # One or more packages are not installed, log debug message and return False log.debug(f"Package not installed: {pkgs}") return False
Checks if the specified package(s) are installed with the correct version. This function can handle package specifications with or without version constraints, and can also filter out command-line options and URLs when a 'friendly' string is provided. Parameters: - package: A string that specifies one or more packages with optional version constraints. - friendly: An optional string used to provide a cleaner version of the package string that excludes command-line options and URLs. Returns: - True if all specified packages are installed with the correct versions, False otherwise. Note: This function was adapted from code written by vladimandic.
installed
python
bmaltais/kohya_ss
setup/setup_common.py
https://github.com/bmaltais/kohya_ss/blob/master/setup/setup_common.py
Apache-2.0
def install( package, friendly: str = None, ignore: bool = False, reinstall: bool = False, show_stdout: bool = False, ): """ Installs or upgrades a Python package using pip, with options to ignode errors, reinstall packages, and display outputs. Parameters: - package (str): The name of the package to be installed or upgraded. Can include version specifiers. Anything after a '#' in the package name will be ignored. - friendly (str, optional): A more user-friendly name for the package, used for logging or user interface purposes. Defaults to None. - ignore (bool, optional): If True, any errors encountered during the installation will be ignored. Defaults to False. - reinstall (bool, optional): If True, forces the reinstallation of the package even if it's already installed. This also disables any quick install checks. Defaults to False. - show_stdout (bool, optional): If True, displays the standard output from the pip command to the console. Useful for debugging. Defaults to False. Returns: None. The function performs operations that affect the environment but does not return any value. Note: If `reinstall` is True, it disables any mechanism that allows for skipping installations when the package is already present, forcing a fresh install. """ log.debug(f"Installing package: {package}") # Remove anything after '#' in the package variable package = package.split("#")[0].strip() if reinstall: global quick_allowed # pylint: disable=global-statement quick_allowed = False if reinstall or not installed(package, friendly): pip(f"install --upgrade {package}", ignore=ignore, show_stdout=show_stdout)
Installs or upgrades a Python package using pip, with options to ignode errors, reinstall packages, and display outputs. Parameters: - package (str): The name of the package to be installed or upgraded. Can include version specifiers. Anything after a '#' in the package name will be ignored. - friendly (str, optional): A more user-friendly name for the package, used for logging or user interface purposes. Defaults to None. - ignore (bool, optional): If True, any errors encountered during the installation will be ignored. Defaults to False. - reinstall (bool, optional): If True, forces the reinstallation of the package even if it's already installed. This also disables any quick install checks. Defaults to False. - show_stdout (bool, optional): If True, displays the standard output from the pip command to the console. Useful for debugging. Defaults to False. Returns: None. The function performs operations that affect the environment but does not return any value. Note: If `reinstall` is True, it disables any mechanism that allows for skipping installations when the package is already present, forcing a fresh install.
install
python
bmaltais/kohya_ss
setup/setup_common.py
https://github.com/bmaltais/kohya_ss/blob/master/setup/setup_common.py
Apache-2.0
def install_requirements( requirements_file, check_no_verify_flag=False, show_stdout: bool = False ): """ Install or verify modules from a requirements file. Parameters: - requirements_file (str): Path to the requirements file. - check_no_verify_flag (bool): If True, verify modules installation status without installing. - show_stdout (bool): If True, show the standard output of the installation process. """ log.debug(f"Installing requirements from file: {requirements_file}") action = "Verifying" if check_no_verify_flag else "Installing" log.info(f"{action} modules from {requirements_file}...") with open(requirements_file, "r", encoding="utf8") as f: lines = [ line.strip() for line in f.readlines() if line.strip() and not line.startswith("#") and "no_verify" not in line ] for line in lines: if line.startswith("-r"): included_file = line[2:].strip() log.debug(f"Processing included requirements file: {included_file}") install_requirements( included_file, check_no_verify_flag=check_no_verify_flag, show_stdout=show_stdout, ) else: process_requirements_line(line, show_stdout=show_stdout)
Install or verify modules from a requirements file. Parameters: - requirements_file (str): Path to the requirements file. - check_no_verify_flag (bool): If True, verify modules installation status without installing. - show_stdout (bool): If True, show the standard output of the installation process.
install_requirements
python
bmaltais/kohya_ss
setup/setup_common.py
https://github.com/bmaltais/kohya_ss/blob/master/setup/setup_common.py
Apache-2.0
def sync_bits_and_bytes_files(): """ Check for "different" bitsandbytes Files and copy only if necessary. This function is specific for Windows OS. """ # Only execute on Windows if os.name != "nt": print("This function is only applicable to Windows OS.") return try: # Define source and destination directories source_dir = os.path.join(os.getcwd(), "bitsandbytes_windows") dest_dir_base = os.path.join(sysconfig.get_paths()["purelib"], "bitsandbytes") # Clear file comparison cache filecmp.clear_cache() # Iterate over each file in source directory for file in os.listdir(source_dir): source_file_path = os.path.join(source_dir, file) # Decide the destination directory based on file name if file in ("main.py", "paths.py"): dest_dir = os.path.join(dest_dir_base, "cuda_setup") else: dest_dir = dest_dir_base # Copy file from source to destination, maintaining original file's metadata print(f'Copy {source_file_path} to {dest_dir}') shutil.copy2(source_file_path, dest_dir) except FileNotFoundError as fnf_error: print(f"File not found error: {fnf_error}") except PermissionError as perm_error: print(f"Permission error: {perm_error}") except Exception as e: print(f"An unexpected error occurred: {e}")
Check for "different" bitsandbytes Files and copy only if necessary. This function is specific for Windows OS.
sync_bits_and_bytes_files
python
bmaltais/kohya_ss
setup/update_bitsandbytes.py
https://github.com/bmaltais/kohya_ss/blob/master/setup/update_bitsandbytes.py
Apache-2.0
def check_path_with_space(): """Check if the current working directory contains a space.""" cwd = os.getcwd() log.debug(f"Current working directory: {cwd}") if " " in cwd: # Log an error if the current working directory contains spaces log.error( "The path in which this python code is executed contains one or many spaces. This is not supported for running kohya_ss GUI." ) log.error( "Please move the repo to a path without spaces, delete the venv folder, and run setup.sh again." ) log.error(f"The current working directory is: {cwd}") raise RuntimeError("Invalid path: contains spaces.")
Check if the current working directory contains a space.
check_path_with_space
python
bmaltais/kohya_ss
setup/validate_requirements.py
https://github.com/bmaltais/kohya_ss/blob/master/setup/validate_requirements.py
Apache-2.0
def detect_toolkit(): """Detect the available toolkit (NVIDIA, AMD, or Intel) and log the information.""" log.debug("Detecting available toolkit...") # Check for NVIDIA toolkit by looking for nvidia-smi executable if shutil.which("nvidia-smi") or os.path.exists( os.path.join( os.environ.get("SystemRoot", r"C:\Windows"), "System32", "nvidia-smi.exe" ) ): log.debug("nVidia toolkit detected") return "nVidia" # Check for AMD toolkit by looking for rocminfo executable elif shutil.which("rocminfo") or os.path.exists("/opt/rocm/bin/rocminfo"): log.debug("AMD toolkit detected") return "AMD" # Check for Intel toolkit by looking for SYCL or OneAPI indicators elif ( shutil.which("sycl-ls") or os.environ.get("ONEAPI_ROOT") or os.path.exists("/opt/intel/oneapi") ): log.debug("Intel toolkit detected") return "Intel" # Default to CPU if no toolkit is detected else: log.debug("No specific GPU toolkit detected, defaulting to CPU") return "CPU"
Detect the available toolkit (NVIDIA, AMD, or Intel) and log the information.
detect_toolkit
python
bmaltais/kohya_ss
setup/validate_requirements.py
https://github.com/bmaltais/kohya_ss/blob/master/setup/validate_requirements.py
Apache-2.0
def check_torch(): """Check if torch is available and log the relevant information.""" # Detect the available toolkit (e.g., NVIDIA, AMD, Intel, or CPU) toolkit = detect_toolkit() log.info(f"{toolkit} toolkit detected") try: # Import PyTorch log.debug("Importing PyTorch...") import torch ipex = None # Attempt to import Intel Extension for PyTorch if Intel toolkit is detected if toolkit == "Intel": try: log.debug("Attempting to import Intel Extension for PyTorch (IPEX)...") import intel_extension_for_pytorch as ipex log.debug("Intel Extension for PyTorch (IPEX) imported successfully") except ImportError: log.warning("Intel Extension for PyTorch (IPEX) not found.") # Log the PyTorch version log.info(f"Torch {torch.__version__}") # Check if CUDA (NVIDIA GPU) is available if torch.cuda.is_available(): log.debug("CUDA is available, logging CUDA info...") log_cuda_info(torch) # Check if XPU (Intel GPU) is available elif hasattr(torch, "xpu") and torch.xpu.is_available(): log.debug("XPU is available, logging XPU info...") log_xpu_info(torch, ipex) # Log a warning if no GPU is available elif hasattr(torch, "mps") and torch.mps.is_available(): log.info("MPS is available, logging MPS info...") log_mps_info(torch) else: log.warning("Torch reports GPU not available") # Return the major version of PyTorch return int(torch.__version__[0]) except ImportError as e: # Log an error if PyTorch cannot be loaded log.error(f"Could not load torch: {e}") sys.exit(1) except Exception as e: # Log an unexpected error log.error(f"Unexpected error while checking torch: {e}") sys.exit(1)
Check if torch is available and log the relevant information.
check_torch
python
bmaltais/kohya_ss
setup/validate_requirements.py
https://github.com/bmaltais/kohya_ss/blob/master/setup/validate_requirements.py
Apache-2.0
def log_cuda_info(torch): """Log information about CUDA-enabled GPUs.""" # Log the CUDA and cuDNN versions if available if torch.version.cuda: log.info( f'Torch backend: nVidia CUDA {torch.version.cuda} cuDNN {torch.backends.cudnn.version() if torch.backends.cudnn.is_available() else "N/A"}' ) # Log the ROCm HIP version if using AMD GPU elif torch.version.hip: log.info(f"Torch backend: AMD ROCm HIP {torch.version.hip}") else: log.warning("Unknown Torch backend") # Log information about each detected CUDA-enabled GPU for device in range(torch.cuda.device_count()): props = torch.cuda.get_device_properties(device) log.info( f"Torch detected GPU: {props.name} VRAM {round(props.total_memory / 1024 / 1024)}MB Arch {props.major}.{props.minor} Cores {props.multi_processor_count}" )
Log information about CUDA-enabled GPUs.
log_cuda_info
python
bmaltais/kohya_ss
setup/validate_requirements.py
https://github.com/bmaltais/kohya_ss/blob/master/setup/validate_requirements.py
Apache-2.0
def log_mps_info(torch): """Log information about Apple Silicone (MPS)""" max_reccomended_mem = round(torch.mps.recommended_max_memory() / 1024**2) log.info( f"Torch detected Apple MPS: {max_reccomended_mem}MB Unified Memory Available" ) log.warning('MPS support is still experimental, proceed with caution.')
Log information about Apple Silicone (MPS)
log_mps_info
python
bmaltais/kohya_ss
setup/validate_requirements.py
https://github.com/bmaltais/kohya_ss/blob/master/setup/validate_requirements.py
Apache-2.0
def log_xpu_info(torch, ipex): """Log information about Intel XPU-enabled GPUs.""" # Log the Intel Extension for PyTorch (IPEX) version if available if ipex: log.info(f"Torch backend: Intel IPEX {ipex.__version__}") # Log information about each detected XPU-enabled GPU for device in range(torch.xpu.device_count()): props = torch.xpu.get_device_properties(device) log.info( f"Torch detected GPU: {props.name} VRAM {round(props.total_memory / 1024 / 1024)}MB Compute Units {props.max_compute_units}" )
Log information about Intel XPU-enabled GPUs.
log_xpu_info
python
bmaltais/kohya_ss
setup/validate_requirements.py
https://github.com/bmaltais/kohya_ss/blob/master/setup/validate_requirements.py
Apache-2.0
def writable_dir(target_path): """ Check if a path is a valid directory and that it can be written to. """ path = Path(target_path) if path.is_dir(): if os.access(path, os.W_OK): return path else: raise argparse.ArgumentTypeError(f"Directory '{path}' is not writable.") else: raise argparse.ArgumentTypeError(f"Directory '{path}' does not exist.")
Check if a path is a valid directory and that it can be written to.
writable_dir
python
bmaltais/kohya_ss
tools/caption.py
https://github.com/bmaltais/kohya_ss/blob/master/tools/caption.py
Apache-2.0
def create_text_file(image_filename, output_directory, text_extension): """Create a text file with the same name as the image file.""" # Extract prompt from filename prompt = Path(image_filename).stem # Construct path for the output text file text_file_path = Path(output_directory) / (prompt + text_extension) try: # Write prompt to text file with open(text_file_path, 'w') as text_file: text_file.write(prompt) logging.info(f"Text file created: {text_file_path}") return 1 except IOError as e: logging.error(f"Failed to write to {text_file_path}: {e}") return 0
Create a text file with the same name as the image file.
create_text_file
python
bmaltais/kohya_ss
tools/caption_from_filename.py
https://github.com/bmaltais/kohya_ss/blob/master/tools/caption_from_filename.py
Apache-2.0
def writable_dir(target_path): """ Check if a path is a valid directory and that it can be written to. """ path = Path(target_path) if path.is_dir(): if os.access(path, os.W_OK): return path else: raise argparse.ArgumentTypeError(f"Directory '{path}' is not writable.") else: raise argparse.ArgumentTypeError(f"Directory '{path}' does not exist.")
Check if a path is a valid directory and that it can be written to.
writable_dir
python
bmaltais/kohya_ss
tools/cleanup_captions.py
https://github.com/bmaltais/kohya_ss/blob/master/tools/cleanup_captions.py
Apache-2.0
def writable_dir(target_path): """ Check if a path is a valid directory and that it can be written to. """ path = Path(target_path) if path.is_dir(): if os.access(path, os.W_OK): return path else: raise argparse.ArgumentTypeError(f"Directory '{path}' is not writable.") else: raise argparse.ArgumentTypeError(f"Directory '{path}' does not exist.")
Check if a path is a valid directory and that it can be written to.
writable_dir
python
bmaltais/kohya_ss
tools/convert_images_to_hq_jpg.py
https://github.com/bmaltais/kohya_ss/blob/master/tools/convert_images_to_hq_jpg.py
Apache-2.0
def writable_dir(target_path): """ Check if a path is a valid directory and that it can be written to. """ path = Path(target_path) if path.is_dir(): if os.access(path, os.W_OK): return path else: raise argparse.ArgumentTypeError(f"Directory '{path}' is not writable.") else: raise argparse.ArgumentTypeError(f"Directory '{path}' does not exist.")
Check if a path is a valid directory and that it can be written to.
writable_dir
python
bmaltais/kohya_ss
tools/convert_images_to_webp.py
https://github.com/bmaltais/kohya_ss/blob/master/tools/convert_images_to_webp.py
Apache-2.0
def aspect_ratio(img_path): """ Calculate and return the aspect ratio of an image. Parameters: img_path: A string representing the path to the input image. Returns: float: Aspect ratio of the input image, defined as width / height. Returns None if the image cannot be read. """ try: image = cv2.imread(img_path) if image is None: raise ValueError("Image not found or could not be read.") height, width = image.shape[:2] return float(width) / float(height) except Exception as e: print(f"Error: {e}") return None
Calculate and return the aspect ratio of an image. Parameters: img_path: A string representing the path to the input image. Returns: float: Aspect ratio of the input image, defined as width / height. Returns None if the image cannot be read.
aspect_ratio
python
bmaltais/kohya_ss
tools/crop_images_to_n_buckets.py
https://github.com/bmaltais/kohya_ss/blob/master/tools/crop_images_to_n_buckets.py
Apache-2.0
def sort_images_by_aspect_ratio(path): """Sort all images in a folder by aspect ratio""" images = [] for filename in os.listdir(path): if filename.endswith(".jpg") or filename.endswith(".jpeg") or filename.endswith(".png") or filename.endswith(".webp"): print(filename) img_path = os.path.join(path, filename) images.append((img_path, aspect_ratio(img_path))) # sort the list of tuples based on the aspect ratio sorted_images = sorted(images, key=lambda x: x[1]) return sorted_images
Sort all images in a folder by aspect ratio
sort_images_by_aspect_ratio
python
bmaltais/kohya_ss
tools/crop_images_to_n_buckets.py
https://github.com/bmaltais/kohya_ss/blob/master/tools/crop_images_to_n_buckets.py
Apache-2.0
def create_groups(sorted_images, n_groups): """ Create groups of images from a sorted list of images. This function takes a sorted list of images and a group size as input, and returns a list of groups, where each group contains a specified number of images. Parameters: sorted_images (list of tuples): A list of tuples, where each tuple contains the path to an image and its aspect ratio. n_groups (int): The number of images to include in each group. Returns: list of lists: A list of groups, where each group is a list of tuples representing the images in the group. Raises: ValueError: If the group size is not a positive integer or if the group size is greater than the number of images. """ if not isinstance(n_groups, int) or n_groups <= 0: raise ValueError("Error: n_groups must be a positive integer.") if n_groups > len(sorted_images): raise ValueError("Error: n_groups must be less than or equal to the number of images.") n = len(sorted_images) size = n // n_groups groups = [sorted_images[i * size : (i + 1) * size] for i in range(n_groups - 1)] groups.append(sorted_images[(n_groups - 1) * size:]) return groups
Create groups of images from a sorted list of images. This function takes a sorted list of images and a group size as input, and returns a list of groups, where each group contains a specified number of images. Parameters: sorted_images (list of tuples): A list of tuples, where each tuple contains the path to an image and its aspect ratio. n_groups (int): The number of images to include in each group. Returns: list of lists: A list of groups, where each group is a list of tuples representing the images in the group. Raises: ValueError: If the group size is not a positive integer or if the group size is greater than the number of images.
create_groups
python
bmaltais/kohya_ss
tools/crop_images_to_n_buckets.py
https://github.com/bmaltais/kohya_ss/blob/master/tools/crop_images_to_n_buckets.py
Apache-2.0
def average_aspect_ratio(group): """ Calculate the average aspect ratio for a given group of images. Parameters: group (list of tuples):, A list of tuples, where each tuple contains the path to an image and its aspect ratio. Returns: float: The average aspect ratio of the images in the group. """ if not group: print("Error: The group is empty") return None try: aspect_ratios = [aspect_ratio for _, aspect_ratio in group] avg_aspect_ratio = sum(aspect_ratios) / len(aspect_ratios) print(f"Average aspect ratio for group: {avg_aspect_ratio}") return avg_aspect_ratio except TypeError: print("Error: Check the structure of the input group elements. They should be tuples of (image_path, aspect_ratio).") return None except Exception as e: print(f"Error: {e}") return None
Calculate the average aspect ratio for a given group of images. Parameters: group (list of tuples):, A list of tuples, where each tuple contains the path to an image and its aspect ratio. Returns: float: The average aspect ratio of the images in the group.
average_aspect_ratio
python
bmaltais/kohya_ss
tools/crop_images_to_n_buckets.py
https://github.com/bmaltais/kohya_ss/blob/master/tools/crop_images_to_n_buckets.py
Apache-2.0
def center_crop_image(image, target_aspect_ratio): """Crop the input image to the target aspect ratio. The function calculates the crop region for the input image based on its current aspect ratio and the target aspect ratio. Args: image: A numpy array representing the input image. target_aspect_ratio: A float representing the target aspect ratio. Returns: A numpy array representing the cropped image. Raises: ValueError: If the input image is not a valid numpy array with at least two dimensions or if the calculated new width or height is zero. """ # Check if the input image is a valid numpy array with at least two dimensions if not isinstance(image, np.ndarray) or image.ndim < 2: raise ValueError("Input image must be a valid numpy array with at least two dimensions.") height, width = image.shape[:2] current_aspect_ratio = float(width) / float(height) # If the current aspect ratio is already equal to the target aspect ratio, return the image as is if current_aspect_ratio == target_aspect_ratio: return image # Calculate the new width and height based on the target aspect ratio if current_aspect_ratio > target_aspect_ratio: new_width = int(target_aspect_ratio * height) if new_width == 0: raise ValueError("Calculated new width is zero. Please check the input image and target aspect ratio.") x_start = (width - new_width) // 2 cropped_image = image[:, x_start:x_start+new_width] else: new_height = int(width / target_aspect_ratio) if new_height == 0: raise ValueError("Calculated new height is zero. Please check the input image and target aspect ratio.") y_start = (height - new_height) // 2 cropped_image = image[y_start:y_start+new_height, :] return cropped_image
Crop the input image to the target aspect ratio. The function calculates the crop region for the input image based on its current aspect ratio and the target aspect ratio. Args: image: A numpy array representing the input image. target_aspect_ratio: A float representing the target aspect ratio. Returns: A numpy array representing the cropped image. Raises: ValueError: If the input image is not a valid numpy array with at least two dimensions or if the calculated new width or height is zero.
center_crop_image
python
bmaltais/kohya_ss
tools/crop_images_to_n_buckets.py
https://github.com/bmaltais/kohya_ss/blob/master/tools/crop_images_to_n_buckets.py
Apache-2.0
def copy_related_files(img_path, save_path): """ Copy all files in the same directory as the input image that have the same base name as the input image to the output directory with the corresponding new filename. Args: img_path (str): Path to the input image file. save_path: Path to the output directory where the files should be copied with a new name. """ # Get the base filename and directory img_dir, img_basename = os.path.split(img_path) img_base, img_ext = os.path.splitext(img_basename) save_dir, save_basename = os.path.split(save_path) save_base, save_ext = os.path.splitext(save_basename) # Create the output directory if it does not exist if not os.path.exists(save_dir): os.makedirs(save_dir) # Loop over all files in the same directory as the input image try: for filename in os.listdir(img_dir): # Skip files with the same name as the input image if filename == img_basename: continue # Check if the file has the same base name as the input image file_base, file_ext = os.path.splitext(filename) if file_base == img_base: # Build the new filename and copy the file new_filename = os.path.join(save_dir, f"{save_base}{file_ext}") shutil.copy2(os.path.join(img_dir, filename), new_filename) except OSError as e: print(f"Error: {e}") # Handle errors from os.listdir()
Copy all files in the same directory as the input image that have the same base name as the input image to the output directory with the corresponding new filename. Args: img_path (str): Path to the input image file. save_path: Path to the output directory where the files should be copied with a new name.
copy_related_files
python
bmaltais/kohya_ss
tools/crop_images_to_n_buckets.py
https://github.com/bmaltais/kohya_ss/blob/master/tools/crop_images_to_n_buckets.py
Apache-2.0
def save_resized_cropped_images(group, folder_name, group_number, avg_aspect_ratio, use_original_name=False): """Crop and resize all images in the input group to the smallest resolution, and save them to a folder. Args: group: A list of tuples, where each tuple contains the path to an image and its aspect ratio. folder_name: A string representing the name of the folder to save the images to. group_number: An integer representing the group number. avg_aspect_ratio: A float representing the average aspect ratio of the images in the group. use_original_name: A boolean indicating whether to save the images with their original file names. """ if not os.path.exists(folder_name): os.makedirs(folder_name) # get the smallest size of the images smallest_res = float("inf") for img_path, _ in group: image = cv2.imread(img_path) cropped_image = center_crop_image(image, avg_aspect_ratio) height, width = cropped_image.shape[:2] image_res = height * width if image_res < smallest_res: smallest_res = image_res small_height, small_width = height, width # resize all images to the smallest resolution of the images in the group for i, (img_path, aspect_ratio) in enumerate(group): image = cv2.imread(img_path) cropped_image = center_crop_image(image, avg_aspect_ratio) # resized_image = cv2.resize(cropped_image, (small_width, small_height)) if use_original_name: save_name = os.path.basename(img_path) else: save_name = f"group_{group_number}_{i}.jpg" save_path = os.path.join(folder_name, save_name) cv2.imwrite(save_path, cropped_image) # Copy matching files named the same as img_path to copy_related_files(img_path, save_path) print(f"Saved {save_name} to {folder_name}")
Crop and resize all images in the input group to the smallest resolution, and save them to a folder. Args: group: A list of tuples, where each tuple contains the path to an image and its aspect ratio. folder_name: A string representing the name of the folder to save the images to. group_number: An integer representing the group number. avg_aspect_ratio: A float representing the average aspect ratio of the images in the group. use_original_name: A boolean indicating whether to save the images with their original file names.
save_resized_cropped_images
python
bmaltais/kohya_ss
tools/crop_images_to_n_buckets.py
https://github.com/bmaltais/kohya_ss/blob/master/tools/crop_images_to_n_buckets.py
Apache-2.0
def main(): """Main method for building model from command line.""" empty_args = core.convert_build_args_to_argparser() # Create new ArgumentParser parsed_args = empty_args.parse_args() # Parse through command line # Post processing of arguments parsed_args = core._parse_args(parsed_args) # pylint: disable=protected-access core.build_model_from_args(parsed_args)
Main method for building model from command line.
main
python
llSourcell/Doctor-Dignity
mlc_llm/build.py
https://github.com/llSourcell/Doctor-Dignity/blob/master/mlc_llm/build.py
Apache-2.0
def convert_build_args_to_argparser() -> argparse.ArgumentParser: """Convert from BuildArgs to an equivalent ArgumentParser.""" args = argparse.ArgumentParser() for field in fields(BuildArgs): name = field.name.replace("_", "-") field_name = f"--{name}" # `kwargs` contains `help`, `choices`, and `action` kwargs = field.metadata.copy() if field.type == bool: # boolean arguments do not need to specify `type` args.add_argument(field_name, default=field.default, **kwargs) else: args.add_argument(field_name, type=field.type, default=field.default, **kwargs) return args
Convert from BuildArgs to an equivalent ArgumentParser.
convert_build_args_to_argparser
python
llSourcell/Doctor-Dignity
mlc_llm/core.py
https://github.com/llSourcell/Doctor-Dignity/blob/master/mlc_llm/core.py
Apache-2.0
def mod_transform_before_build( mod: tvm.IRModule, param_manager: param_manager.ParamManager, args: argparse.Namespace, config: Dict, ) -> tvm.IRModule: """First-stage: Legalize ops and trace""" if args.model.startswith("minigpt"): model_names = ["embed"] else: model_names = [ "prefill", "decode", "create_kv_cache", "softmax_with_temperature", "get_metadata", ] if args.sep_embed: model_names = ["embed", "prefill_with_embed"] + model_names[1:] if args.model.lower().startswith("rwkv-"): model_names += ["reset_kv_cache"] mod = param_manager.transform_dequantize(mod) use_ft_quant = args.quantization.name in ["q4f16_ft", "q8f16_ft"] mod = mlc_llm.transform.FuseDecodeTranspose(skip_gemm=not use_ft_quant)( mod ) # pylint: disable=not-callable if hasattr(config, "num_attention_heads") and hasattr(config, "hidden_size"): max_seq_len = None if args.max_seq_len > 0: max_seq_len = args.max_seq_len elif hasattr(config, "max_sequence_length"): max_seq_len = config.max_sequence_length if max_seq_len: mod = fuse_split_rotary_embedding( mod, config.num_attention_heads, config.hidden_size, max_seq_len ) if args.target_kind == "cuda": patterns = [] has_cutlass = tvm.get_global_func("relax.ext.cutlass", True) if has_cutlass and not args.no_cutlass_attn: mod["prefill"] = rewrite_attention(mod["prefill"]) mod["decode"] = rewrite_attention(mod["decode"]) patterns += get_patterns_with_prefix("cutlass.attention") if has_cutlass and not args.no_cutlass_norm: patterns += get_patterns_with_prefix("cutlass.layer_norm") patterns += get_patterns_with_prefix("cutlass.rms_norm") if has_cutlass and use_ft_quant: patterns += get_patterns_with_prefix("cutlass.decode_matmul") has_cublas = tvm.get_global_func("relax.ext.cublas", True) if has_cublas and args.quantization.name in ("q0f16", "q0f32") and not args.no_cublas: patterns += get_patterns_with_prefix("cublas") if len(patterns) > 0: os.makedirs("./tmp", exist_ok=True) major, minor = parse_compute_version(tvm.cuda(0).compute_version) if major == 8: sm = 80 else: sm = 10 * major + minor mod = tvm.transform.Sequential( [ relax.transform.FuseOpsByPattern( patterns, bind_constants=False, annotate_codegen=True ), annotate_workspace, relax.transform.AllocateWorkspace(), relax.transform.RunCodegen( {"cutlass": {"sm": sm, "find_first_valid": False}}, entry_functions=model_names, ), ] )(mod) mod = mlc_llm.transform.FuseTransposeMatmul()(mod) # pylint: disable=not-callable mod = relax.pipeline.get_pipeline()(mod) # pylint: disable=no-value-for-parameter mod = mlc_llm.transform.FuseDecodeMatmulEwise( # pylint: disable=not-callable args.quantization.name, args.target_kind )(mod) mod = mlc_llm.transform.FuseDecodeTake()(mod) mod = relax.transform.DeadCodeElimination(model_names)(mod) mod = mlc_llm.transform.CleanUpTIRAttrs()(mod) mod_deploy = mod utils.debug_dump_script(mod_deploy, "mod_deploy.py", args) return mod_deploy
First-stage: Legalize ops and trace
mod_transform_before_build
python
llSourcell/Doctor-Dignity
mlc_llm/core.py
https://github.com/llSourcell/Doctor-Dignity/blob/master/mlc_llm/core.py
Apache-2.0
def build_model(args: BuildArgs) -> (Optional[str], Optional[str], Optional[str]): r"""Builds/compiles a model. Parameters ---------- args : :class:`BuildArgs` A dataclass of arguments for building models. Returns ---------- lib_path: Optional[str] The path to the model library file. Return ``None`` if not applicable. model_path: Optional[str] The path to the folder of the model's parameters. Return ``None`` if not applicable. chat_config_path: Optional[str] The path to the chat config `.json` file. Return ``None`` if not applicable. """ # Convert BuildArgs to argparse.Namespace so that we can share the rest # of the code with the command line workflow build_args_as_dict = asdict(args) build_args_namespace = argparse.Namespace(**build_args_as_dict) args = _parse_args(build_args_namespace) build_model_from_args(args) # Prepare output; some workflows may or may not have the paths to return lib_path = args.lib_path if hasattr(args, "lib_path") else None model_path = args.params_path if hasattr(args, "params_path") else None chat_config_path = args.chat_config_path if hasattr(args, "chat_config_path") else None return lib_path, model_path, chat_config_path
Builds/compiles a model. Parameters ---------- args : :class:`BuildArgs` A dataclass of arguments for building models. Returns ---------- lib_path: Optional[str] The path to the model library file. Return ``None`` if not applicable. model_path: Optional[str] The path to the folder of the model's parameters. Return ``None`` if not applicable. chat_config_path: Optional[str] The path to the chat config `.json` file. Return ``None`` if not applicable.
build_model
python
llSourcell/Doctor-Dignity
mlc_llm/core.py
https://github.com/llSourcell/Doctor-Dignity/blob/master/mlc_llm/core.py
Apache-2.0
def debug_dump_benchmark_script( mod: tvm.ir.IRModule, name: str, args: argparse.Namespace, ) -> None: """Extract model level benchmark workloads from relax model.""" if not args.debug_dump: return from tvm.dlight.benchmark import ( # pylint: disable=import-error,import-outside-toplevel extract_all_func_info_from_relax, ) dump_path = os.path.join(args.artifact_path, "debug", name + ".py") with open(dump_path, "w", encoding="utf-8") as outfile: outfile.write( "# Please save this file to dlight_bench/models and add\n" + f"# `from .{name} import *` to dlight_bench/models/__init__.py\n" + "from dlight_bench import DlightBench\n" + "from tvm.script import tir as T\n\n" ) stmt = [] try: relax_funcs, _ = extract_all_func_info_from_relax(mod) except NotImplementedError: return tvm_script_prefix = "# from tvm.script import tir as T" for relax_func_gv in relax_funcs: # pylint: disable=consider-using-dict-items for prim_func_gv in relax_funcs[relax_func_gv]: # add global_symbol func_body = ( mod[prim_func_gv] .with_attr("global_symbol", prim_func_gv.name_hint) .script(name=prim_func_gv.name_hint) ) # remove prefix if func_body.startswith(tvm_script_prefix + "\n"): func_body = func_body[len(tvm_script_prefix) :] # print out outfile.write(func_body + "\n") # register stmt.append( f"DlightBench.register_bench_workload({prim_func_gv.name_hint}, " f"'{name}', '{prim_func_gv.name_hint}')" ) outfile.write("\n" + "\n".join(stmt) + "\n") print(f"Dump benchmarking script to {dump_path}.")
Extract model level benchmark workloads from relax model.
debug_dump_benchmark_script
python
llSourcell/Doctor-Dignity
mlc_llm/utils.py
https://github.com/llSourcell/Doctor-Dignity/blob/master/mlc_llm/utils.py
Apache-2.0
def tvm_callback_cuda_compile(code, target): # pylint: disable=unused-argument """use nvcc to generate fatbin code for better optimization""" arch = [] for compute_version in compute_versions: arch += ["-gencode", f"arch=compute_{compute_version},code=sm_{compute_version}"] ptx = nvcc.compile_cuda(code, target_format="fatbin", arch=arch) return ptx
use nvcc to generate fatbin code for better optimization
tvm_callback_cuda_compile
python
llSourcell/Doctor-Dignity
mlc_llm/utils.py
https://github.com/llSourcell/Doctor-Dignity/blob/master/mlc_llm/utils.py
Apache-2.0
def get_loaded_tensor_info( self, pname: str, param_info: relax.TensorStructInfo ) -> Tuple[List[str], List[relax.TensorStructInfo]]: """Returns the names and shapes and dtypes of the tensors that need to be loaded from the disk. It is useful when the parameter is pre-quantized. In such cases, we need to know how many tensors the parameter is quantized into, and together with the dtype and shape of each tensor, so that we can load the pre-quantized tensors in. """ return [pname], [param_info]
Returns the names and shapes and dtypes of the tensors that need to be loaded from the disk. It is useful when the parameter is pre-quantized. In such cases, we need to know how many tensors the parameter is quantized into, and together with the dtype and shape of each tensor, so that we can load the pre-quantized tensors in.
get_loaded_tensor_info
python
llSourcell/Doctor-Dignity
mlc_llm/quantization/quantization.py
https://github.com/llSourcell/Doctor-Dignity/blob/master/mlc_llm/quantization/quantization.py
Apache-2.0
def get_dequantize_func( self, param_info: relax.TensorStructInfo, qparam_info: List[relax.TensorStructInfo], ) -> Optional[FQuantize]: """Returns the function which computes dequantization. Returning `None` means the parameter does not need dequantization. The returned function takes a Relax BlockBuilder and a (list of) quantized weight relax Var, computes the dequantization and returns the result Relax Var(s). You can use `convert_TE_func` to convert a TE function to the function of the desired return format. See `group_quantization.py` for examples. """ return NotImplementedError()
Returns the function which computes dequantization. Returning `None` means the parameter does not need dequantization. The returned function takes a Relax BlockBuilder and a (list of) quantized weight relax Var, computes the dequantization and returns the result Relax Var(s). You can use `convert_TE_func` to convert a TE function to the function of the desired return format. See `group_quantization.py` for examples.
get_dequantize_func
python
llSourcell/Doctor-Dignity
mlc_llm/quantization/quantization.py
https://github.com/llSourcell/Doctor-Dignity/blob/master/mlc_llm/quantization/quantization.py
Apache-2.0
def get_param_quant_kind( name: str, param_info: relax.TensorStructInfo ) -> ParamQuantKind: """No quantization for MiniGPT. Use q0f16 or q0f32 when building it.""" return ParamQuantKind.others
No quantization for MiniGPT. Use q0f16 or q0f32 when building it.
get_param_quant_kind
python
llSourcell/Doctor-Dignity
mlc_llm/relax_model/minigpt.py
https://github.com/llSourcell/Doctor-Dignity/blob/master/mlc_llm/relax_model/minigpt.py
Apache-2.0
def register_params( self, model: nn.Module, func_name: str, quantization_scheme: quantization.QuantizationScheme, f_get_param_quant_kind: Callable[ [str, relax.TensorStructInfo], quantization.ParamQuantKind ], ) -> None: """Register the parameters of the input model (within the context of the input function) in the parameter manager. Parameters ---------- model : nn.Module The input model whose parameters are registered. func_name : str The name of the function the input model is in. For example, the "prefill" function or the "decode" function. quantization_scheme : quantization.QuantizationScheme The quantization scheme of the input model, which describes how to quantize the model. f_get_param_quant_kind: Callable[[str, relax.TensorStructInfo], quantization.ParamQuantKind] A function which takes the name and StructInfo (effectively shape and dtype) of a parameter, and returns which quantization kind this parameter uses. This is used for applying quantization to the parameters. """ if quantization_scheme.qspec_updater_class is not None: self.qspec_updater_classes.append(quantization_scheme.qspec_updater_class) if quantization_scheme.f_convert_param_bkwd is not None: self.f_convert_param_bkwd = quantization_scheme.f_convert_param_bkwd if quantization_scheme.f_compute_relax_param is not None: self.f_compute_relax_param = quantization_scheme.f_compute_relax_param if quantization_scheme.f_run_prequantize is not None: self.f_run_prequantize = quantization_scheme.f_run_prequantize self.params_in_func[func_name] = [] # For each parameter in the input model, get its quantization kind and # register the parameter with its name and quantization kind. for name, relax_param in named_parameters(model).items(): quant_kind = f_get_param_quant_kind(name, relax_param.struct_info) param = self._register_param( name, relax_param, getattr(quantization_scheme, quant_kind.name), func_name, ) self.params_in_func[func_name].append(param)
Register the parameters of the input model (within the context of the input function) in the parameter manager. Parameters ---------- model : nn.Module The input model whose parameters are registered. func_name : str The name of the function the input model is in. For example, the "prefill" function or the "decode" function. quantization_scheme : quantization.QuantizationScheme The quantization scheme of the input model, which describes how to quantize the model. f_get_param_quant_kind: Callable[[str, relax.TensorStructInfo], quantization.ParamQuantKind] A function which takes the name and StructInfo (effectively shape and dtype) of a parameter, and returns which quantization kind this parameter uses. This is used for applying quantization to the parameters.
register_params
python
llSourcell/Doctor-Dignity
mlc_llm/relax_model/param_manager.py
https://github.com/llSourcell/Doctor-Dignity/blob/master/mlc_llm/relax_model/param_manager.py
Apache-2.0
def set_param_loading_func( self, model_path: str, use_safetensors: bool, f_convert_pname_fwd: Callable[[str], List[str]] = lambda pname: [pname], f_convert_param_bkwd: Callable[ [str, Any], Optional[List[Tuple[str, Any]]] ] = lambda pname, torch_param: [(pname, torch_param)], f_compute_relax_param: Callable[[str, List[Any]], Any] = f_default_compute_relax_param, *, no_lazy_param_loading: bool = False, ) -> None: """Set the parameter loading functions. Parameters ---------- model_path : str The path of the Hugging Face model on disk. use_safetensors : bool Whether to use ``.safetensors`` instead of ``.bin`` to load model. f_convert_pname_fwd : Callable[[str], List[str]] The function which converts Relax parameter name (ours) to torch's parameter names. See the document of ParamManager for more details. f_convert_param_bkwd : Callable[[str, Any], Optional[List[Tuple[str, Any]]]] The function which converts torch parameter and param name back to Relax parameters with names. `Any` here stands for numpy.ndarray. See the document of ParamManager for more details. f_compute_relax_param : Callable[[str, List[Any]], Any] The function which computes a Relax parameter from a list of torch parameters. `Any` here stands for numpy.ndarray. See the document of ParamManager for more details. no_lazy_param_loading : bool A boolean indicating that no lazy parameter loading from torch is needed. This needs to be set as True when all the model weights are loaded at the time of constructing the model. """ self.f_convert_pname_fwd = f_convert_pname_fwd if self.f_convert_param_bkwd is None: self.f_convert_param_bkwd = f_convert_param_bkwd if self.f_compute_relax_param is None: self.f_compute_relax_param = f_compute_relax_param self.model_path = model_path self.use_safetensors = use_safetensors if self.use_safetensors: # Use a pointer here to prevent repeated import in tvm registered function from safetensors.torch import load_file # pylint: disable=import-outside-toplevel self.safetensors_load_func = load_file pnames_to_load = [] for param_name in self.param_names: param = self.params[param_name] loaded_names, _ = param.quant_spec.get_loaded_tensor_info(param_name, param.param_info) pnames_to_load += loaded_names self.nparam_to_load = len(pnames_to_load) if not no_lazy_param_loading: self.pidx2pname = {pidx: pname for pidx, pname in enumerate(pnames_to_load)} else: self.pidx2pname = dict()
Set the parameter loading functions. Parameters ---------- model_path : str The path of the Hugging Face model on disk. use_safetensors : bool Whether to use ``.safetensors`` instead of ``.bin`` to load model. f_convert_pname_fwd : Callable[[str], List[str]] The function which converts Relax parameter name (ours) to torch's parameter names. See the document of ParamManager for more details. f_convert_param_bkwd : Callable[[str, Any], Optional[List[Tuple[str, Any]]]] The function which converts torch parameter and param name back to Relax parameters with names. `Any` here stands for numpy.ndarray. See the document of ParamManager for more details. f_compute_relax_param : Callable[[str, List[Any]], Any] The function which computes a Relax parameter from a list of torch parameters. `Any` here stands for numpy.ndarray. See the document of ParamManager for more details. no_lazy_param_loading : bool A boolean indicating that no lazy parameter loading from torch is needed. This needs to be set as True when all the model weights are loaded at the time of constructing the model.
set_param_loading_func
python
llSourcell/Doctor-Dignity
mlc_llm/relax_model/param_manager.py
https://github.com/llSourcell/Doctor-Dignity/blob/master/mlc_llm/relax_model/param_manager.py
Apache-2.0
def transform_dequantize(self, mod: tvm.IRModule) -> tvm.IRModule: """Apply dequantization to the input IRModule. Parameters ---------- mod : tvm.IRModule The input IRModule to be applied dequantization. The IRModule contains all the constructed Relax functions (e.g., the "prefill"/"decode" functions) and is expected to have all of its parameters registered in the ParamManager. Returns ------- updated_mod : tvm.IRModule The IRModule updated with the dequantization computation. """ # For each Relax function in the input IRModule (e.g., "prefill"), # we create its input relax.Var of all the quantized data, and # store the mapping from function name to the var. func2param_var: Dict[str, relax.Var] = {} quantized_param_info = self.get_quantized_param_info() for gv, func in mod.functions.items(): if not isinstance(func, relax.Function): continue if func.attrs is None or not "num_input" in func.attrs: continue func2param_var[gv.name_hint] = relax.Var("params", quantized_param_info) # Cache mapping to avoid duplicate dequantization. dequantized_cache: Dict[relax.Var, relax.Var] = {} # Define a var replacement function for applying dequantization. def f_replace(var: relax.Var, bb: relax.BlockBuilder) -> relax.Var: if var in dequantized_cache: return dequantized_cache[var] assert var in self.func_raw_param_map func_name, param = self.func_raw_param_map[var] dequantized = self._dequantize(param, func2param_var[func_name], bb) dequantized_cache[var] = dequantized return dequantized # Create the function mutator for applying dequantization. replacer = ParamReplacer(mod, func2param_var, f_replace) # Update the input IRModule with dequantization. mod = replacer.transform() return mod
Apply dequantization to the input IRModule. Parameters ---------- mod : tvm.IRModule The input IRModule to be applied dequantization. The IRModule contains all the constructed Relax functions (e.g., the "prefill"/"decode" functions) and is expected to have all of its parameters registered in the ParamManager. Returns ------- updated_mod : tvm.IRModule The IRModule updated with the dequantization computation.
transform_dequantize
python
llSourcell/Doctor-Dignity
mlc_llm/relax_model/param_manager.py
https://github.com/llSourcell/Doctor-Dignity/blob/master/mlc_llm/relax_model/param_manager.py
Apache-2.0
def get_param_loading_functions( self, model_params: List[Optional[tvm.nd.NDArray]], loaded_params: List[tvm.nd.NDArray], loaded_idx_set: Set[int], loaded_torch_bins: Set[str], cached_relax_params: Dict[int, tvm.nd.NDArray], cached_torch_params: Dict[str, Any], device: Device, device_cpu: Device, ) -> Tuple[Callable, Callable]: """A wrapper function which returns the `get_item` and `set_item` functions for parameter lazy loading. Parameters ---------- model_params : List[Optional[tvm.nd.NDArray]] The pre-loaded model parameters, for which we skip lazy loading. loaded_params : List[tvm.nd.NDArray] The parameter loading result, storing all the loaded parameters. loaded_idx_set : Set[int] The set of indices of loaded parameters, serving for robustness guarantee to avoid one parameter being loaded for multiple times. loaded_torch_bins : Set[str] The set of torch binary filenames, serving for robustness guarantee to avoid one torch binary file being loaded for multiple times. cached_relax_params : Dict[int, tvm.nd.NDArray] The set of cached Relax parameters. cached_torch_params: Dict[str, Any] The set of cached torch parameters. `Any` here stands for numpy.ndarray. device : Device The device which we load the parameters to. device_cpu : Device The CPU device. """ import torch # pylint: disable=import-outside-toplevel assert self.f_convert_pname_fwd is not None assert self.f_convert_param_bkwd is not None assert self.f_compute_relax_param is not None pname2pidx: Dict[str, int] = {pname: pidx for pidx, pname in self.pidx2pname.items()} def fetch_torch_param(torch_param): if str(torch_param.dtype) == "torch.bfloat16": # Convert to float32 first. return torch_param.detach().cpu().float().numpy() else: return torch_param.detach().cpu().numpy() def load_torch_params_from_bin(torch_binname: str): torch_binpath = os.path.join(self.model_path, torch_binname) torch_params = None if self.use_safetensors: torch_params = self.safetensors_load_func(torch_binpath) else: torch_params = torch.load( torch_binpath, map_location=torch.device("cpu"), ) torch_param_names = list(torch_params.keys()) for torch_param_name in torch_param_names: torch_param = fetch_torch_param(torch_params[torch_param_name]) del torch_params[torch_param_name] relax_params = self.f_convert_param_bkwd(torch_param_name, torch_param) if relax_params is not None: for param_name, param in relax_params: if param_name not in pname2pidx.keys(): continue pidx = pname2pidx[param_name] assert pidx not in cached_relax_params cached_relax_params[pidx] = tvm.nd.array(param, device_cpu) else: assert torch_param_name not in cached_torch_params cached_torch_params[torch_param_name] = torch_param del torch_param def get_item(i): # If the weight is already provided by `model_params`, directly use it # and no need to load from binary file. if model_params[i] is not None: assert i not in cached_relax_params return tvm.nd.array(model_params[i], device=device) # Otherwise, we load the weight from its corresponding binary file. assert i in self.pidx2pname relax_pname = self.pidx2pname[i] torch_pnames = self.f_convert_pname_fwd(relax_pname) if i not in cached_relax_params: for torch_binname in [ self.torch_pname2binname[torch_pname] for torch_pname in torch_pnames ]: if torch_binname in loaded_torch_bins: continue load_torch_params_from_bin(torch_binname) loaded_torch_bins.add(torch_binname) if i not in cached_relax_params: assert len(torch_pnames) > 1 assert all([torch_pname in cached_torch_params] for torch_pname in torch_pnames) cached_relax_params[i] = self.f_compute_relax_param( relax_pname, [cached_torch_params[torch_pname] for torch_pname in torch_pnames], ) for torch_pname in torch_pnames: del cached_torch_params[torch_pname] assert i in cached_relax_params assert i not in loaded_idx_set param_on_device = tvm.nd.array(cached_relax_params[i], device=device) loaded_idx_set.add(i) del cached_relax_params[i] return param_on_device def set_item(i, computed_param): if len(loaded_params) <= i: loaded_params.extend([None for _ in range(i - len(loaded_params) + 1)]) loaded_params[i] = tvm.nd.array(computed_param, device=device_cpu) return get_item, set_item
A wrapper function which returns the `get_item` and `set_item` functions for parameter lazy loading. Parameters ---------- model_params : List[Optional[tvm.nd.NDArray]] The pre-loaded model parameters, for which we skip lazy loading. loaded_params : List[tvm.nd.NDArray] The parameter loading result, storing all the loaded parameters. loaded_idx_set : Set[int] The set of indices of loaded parameters, serving for robustness guarantee to avoid one parameter being loaded for multiple times. loaded_torch_bins : Set[str] The set of torch binary filenames, serving for robustness guarantee to avoid one torch binary file being loaded for multiple times. cached_relax_params : Dict[int, tvm.nd.NDArray] The set of cached Relax parameters. cached_torch_params: Dict[str, Any] The set of cached torch parameters. `Any` here stands for numpy.ndarray. device : Device The device which we load the parameters to. device_cpu : Device The CPU device.
get_param_loading_functions
python
llSourcell/Doctor-Dignity
mlc_llm/relax_model/param_manager.py
https://github.com/llSourcell/Doctor-Dignity/blob/master/mlc_llm/relax_model/param_manager.py
Apache-2.0
def _register_param( self, name: str, var: relax.Var, quant_spec: quantization.QuantizationSpec, func_name: str, ) -> Parameter: """Register a single parameter in the parameter manager. In most cases, this method is not directly used outside this class: it is called by `register_params` above. Parameters ---------- name : str The name of the parameter to register. Name serves as the unique identifier of the parameter. var : relax.Var The parameter relax.Var on the nn.Module side. quant_spec : quantization.QuantizationSpec The quantization specification of the parameter func_name : str The name of the function the input var is in. For example, the "prefill" function or the "decode" function. Returns ------- param : Parameter The registered Parameter. """ assert ( var not in self.func_raw_param_map ), "The input var is not supposed to be already registered." assert isinstance( var.struct_info.shape, relax.ShapeExpr ), "The parameter to register is expected to have static shape" assert all( [isinstance(dim_len, tir.IntImm) for dim_len in var.struct_info.shape.values] ), "The parameter to register is expected to have static shape" if name in self.params: # When the input name appears in `self.params`, it means the input # parameter has been previously registered in some other function. # Thus, we check if the dtype, shape and the quantization specification # of both sides are consistent. param = self.params[name] assert ( param.quant_spec == quant_spec ), "One parameter is expected to be quantized by single specification in all functions." assert ( param.param_info.dtype == var.struct_info.dtype ), "Dtype mismatch of one parameter in two functions." assert ( param.param_info.ndim == var.struct_info.ndim ), "Shape mismatch of one parameter in two functions." for len0, len1 in zip(param.param_info.shape.values, var.struct_info.shape.values): assert len0.value == len1.value, "Shape mismatch of one parameter in two functions." else: # Otherwise, the parameter is registered for the first time. param = Parameter(name, var.struct_info, quant_spec) self.params[name] = param self.param_names.append(name) # Record the mapping from the input relax.Var to the function name and # the parameter in the manager. self.func_raw_param_map[var] = (func_name, param) return param
Register a single parameter in the parameter manager. In most cases, this method is not directly used outside this class: it is called by `register_params` above. Parameters ---------- name : str The name of the parameter to register. Name serves as the unique identifier of the parameter. var : relax.Var The parameter relax.Var on the nn.Module side. quant_spec : quantization.QuantizationSpec The quantization specification of the parameter func_name : str The name of the function the input var is in. For example, the "prefill" function or the "decode" function. Returns ------- param : Parameter The registered Parameter.
_register_param
python
llSourcell/Doctor-Dignity
mlc_llm/relax_model/param_manager.py
https://github.com/llSourcell/Doctor-Dignity/blob/master/mlc_llm/relax_model/param_manager.py
Apache-2.0
def _dequantize( self, param: Parameter, quantized_tuple: relax.Var, bb: relax.BlockBuilder, qparams: List[relax.Var] = None, ) -> relax.Var: """Applying dequantization to the input parameter. This method is called by `transform_module` below, and is not directly invoked outside the class. Parameters ---------- param : Parameter The parameter whose quantized tensors are to be dequantized. quantized_tuple : relax.Var The relax.Var of the quantized tensors of all parameters in the model. bb : relax.BlockBuilder The Relax BlockBuilder used for inserting the dequantization computations. qparams : List[relax.Var] The quantized parts of the parameter. By default it is `None`, in which case we will get the quantized parts from `quantized_tuple`. Returns ------- The dequantized parameter, in the form of a relax.Var. """ if not qparams: # Get the corresponding Relax vars of the quantized tensors of this parameter. qparams: List[relax.Var] = [] for qparam_idx in self.param2qrange[param]: qparams.append(bb.emit(relax.TupleGetItem(quantized_tuple, qparam_idx))) # Get the dequantization function of this parameter. f_dequantize = param.quant_spec.get_dequantize_func( param_info=param.param_info, qparam_info=[qparam.struct_info for qparam in qparams], ) if f_dequantize is None: # If the parameter does not have a dequantization function, its "quantized # data" is expected to have only one element. assert len(qparams) == 1, ( "A parameter without dequantization function is expected not to have " 'more than one "quantized data".' ) return qparams[0] else: # Apply the dequantization function. return bb.emit(f_dequantize(bb, qparams))
Applying dequantization to the input parameter. This method is called by `transform_module` below, and is not directly invoked outside the class. Parameters ---------- param : Parameter The parameter whose quantized tensors are to be dequantized. quantized_tuple : relax.Var The relax.Var of the quantized tensors of all parameters in the model. bb : relax.BlockBuilder The Relax BlockBuilder used for inserting the dequantization computations. qparams : List[relax.Var] The quantized parts of the parameter. By default it is `None`, in which case we will get the quantized parts from `quantized_tuple`. Returns ------- The dequantized parameter, in the form of a relax.Var.
_dequantize
python
llSourcell/Doctor-Dignity
mlc_llm/relax_model/param_manager.py
https://github.com/llSourcell/Doctor-Dignity/blob/master/mlc_llm/relax_model/param_manager.py
Apache-2.0
def load_torch_pname2binname_map( model_path: str, use_safetensors: bool, relax_pnames: Set[str], f_convert_pname_fwd: Callable[[str], List[str]] = lambda pname: [pname], ) -> Dict[str, str]: """Constructing the dictionary from each torch parameter's name to the name of the binary shard where the torch parameter is saved. Parameters ---------- model_path : str The path of the Hugging Face model on disk. use_safetensors: bool Whether to use ``.safetensors`` instead of ``.bin`` to load model. relax_pnames: Set[str] The name of the Relax parameters. f_convert_pname_fwd: Callable[[str], List[str]] The function which converts Relax parameter name to torch's parameter names. See ParamManager for more details. """ bin_idx_path = None single_shard_file_name = None if use_safetensors: bin_idx_path = os.path.join(model_path, "model.safetensors.index.json") single_shard_file_name = "model.safetensors" else: bin_idx_path = os.path.join(model_path, "pytorch_model.bin.index.json") single_shard_file_name = "pytorch_model.bin" single_shard_path = os.path.join(model_path, single_shard_file_name) if os.path.isfile(bin_idx_path): # Multiple weight shards. with open(bin_idx_path, "r") as f_torch_json: torch_bin_json = json.load(f_torch_json) torch_pname2binname = torch_bin_json["weight_map"] elif os.path.isfile(single_shard_path): # Single weight shard. torch_pname2binname = { torch_pname: single_shard_file_name for relax_pname in relax_pnames for torch_pname in f_convert_pname_fwd(relax_pname) } else: suffix = ".safetensors" if use_safetensors else ".bin" shard_names = [] # Collect Scan every single file with the suffix for filename in os.listdir(model_path): if filename.endswith(suffix): shard_names.append(filename) if len(shard_names) == 1: torch_pname2binname = { torch_pname: shard_names[0] for relax_pname in relax_pnames for torch_pname in f_convert_pname_fwd(relax_pname) } else: raise ValueError("Multiple weight shard files without json map is not supported") return torch_pname2binname
Constructing the dictionary from each torch parameter's name to the name of the binary shard where the torch parameter is saved. Parameters ---------- model_path : str The path of the Hugging Face model on disk. use_safetensors: bool Whether to use ``.safetensors`` instead of ``.bin`` to load model. relax_pnames: Set[str] The name of the Relax parameters. f_convert_pname_fwd: Callable[[str], List[str]] The function which converts Relax parameter name to torch's parameter names. See ParamManager for more details.
load_torch_pname2binname_map
python
llSourcell/Doctor-Dignity
mlc_llm/relax_model/param_manager.py
https://github.com/llSourcell/Doctor-Dignity/blob/master/mlc_llm/relax_model/param_manager.py
Apache-2.0
def create_quantize_func(param_manager: ParamManager) -> tvm.IRModule: """Construct the Relax function which computes quantization. This method is called by `transform_module` below, and is not directly invoked outside the class. Parameters ---------- param_manager : ParamManager The parameter manager which has all the parameter information. Returns ------- The created function which computes quantization. Precisely, an IRModule which contains the main quantization Relax function and a series of TIR functions is returned. """ bb = relax.BlockBuilder() param2qrange = dict() # Construct the input of the function. # We need a list of ranges for each # parameter to get its corresponding tensors loaded from disk. input_tensor_info: List[relax.TensorStructInfo] = [] loaded_tensor_ranges: List[range] = [] for name in param_manager.param_names: param = param_manager.params[name] _, loaded_tensor_info = param.quant_spec.get_loaded_tensor_info(name, param.param_info) loaded_tensor_ranges.append( range( len(input_tensor_info), len(input_tensor_info) + len(loaded_tensor_info), ) ) input_tensor_info += loaded_tensor_info raw_param_tuple = relax.Var("params", relax.TupleStructInfo(input_tensor_info)) with bb.function("transform_params", params=[raw_param_tuple]): with bb.dataflow(): quantized_params: List[relax.Var] = [] for pidx, name in enumerate(param_manager.param_names): param = param_manager.params[name] param_vars: List[relax.Var] = [] # Emit relax.TupleGetItem to get the raw parameters or pre-quantized params. for loaded_tensor_idx in loaded_tensor_ranges[pidx]: param_vars.append( bb.emit(relax.TupleGetItem(raw_param_tuple, loaded_tensor_idx)) ) # Get the quantization function of this parameter. f_quantize = param.quant_spec.get_quantize_func(param.param_info) if f_quantize is None: # If the parameter does not have a quantization function, either it # does not need quantization or it is pre-quantized. param2qrange[param] = range( len(quantized_params), len(quantized_params) + len(param_vars), ) quantized_params += param_vars else: # If the parameter has a quantization function, it is not expected # to be pre-quantized. assert len(param_vars) == 1, ( "A parameter with quantization function is not expected " "to be pre-quantized." ) # Apply the quantization function. quantized_data = bb.emit(f_quantize(bb, param_vars)) if isinstance(quantized_data.struct_info, relax.TupleStructInfo): n_tensor = len(quantized_data.struct_info.fields) assert n_tensor > 1 # Record the range of quantized tensors of this parameter. param2qrange[param] = range( len(quantized_params), len(quantized_params) + n_tensor ) # Collect the quantized tensors to return. for i in range(n_tensor): quantized_params.append(bb.emit(relax.TupleGetItem(quantized_data, i))) else: assert isinstance(quantized_data.struct_info, relax.TensorStructInfo) param2qrange[param] = range( len(quantized_params), len(quantized_params) + 1 ) quantized_params.append(quantized_data) output = bb.emit_output(relax.Tuple(quantized_params)) bb.emit_func_output(output) mod = bb.get() param_manager.param2qrange = param2qrange param_manager.quantized_param_info = mod["transform_params"].struct_info.ret # Return the created IRModule. return bb.get()
Construct the Relax function which computes quantization. This method is called by `transform_module` below, and is not directly invoked outside the class. Parameters ---------- param_manager : ParamManager The parameter manager which has all the parameter information. Returns ------- The created function which computes quantization. Precisely, an IRModule which contains the main quantization Relax function and a series of TIR functions is returned.
create_quantize_func
python
llSourcell/Doctor-Dignity
mlc_llm/relax_model/param_manager.py
https://github.com/llSourcell/Doctor-Dignity/blob/master/mlc_llm/relax_model/param_manager.py
Apache-2.0
def create_kv_cache_func(bb: relax.BlockBuilder, config: RWKVConfig) -> None: """NOTE: It's not typical kv-cache, but try to reuse the logic for the quick hack.""" init_shape = relax.ShapeExpr((1, config.hidden_size)) with bb.function("create_kv_cache", []): with bb.dataflow(): input_dtype_zeros = bb.emit(relax.op.zeros(init_shape, config.dtype)) fp32_zeros = bb.emit(relax.op.zeros(init_shape, "float32")) fp32_neg_inf = bb.emit(fp32_zeros - relax.const(1e30, "float32")) caches = [] f_kv_cache_create = relax.extern("vm.builtin.attention_kv_cache_create") conf = [ ("att_x", input_dtype_zeros), ("att_a", fp32_zeros), ("att_b", fp32_zeros), ("att_p", fp32_neg_inf), ("ffn_x", input_dtype_zeros), ] for i in range(config.num_hidden_layers): for name, init_value in conf: caches.append( bb.emit( relax.Call( f_kv_cache_create, [init_value, init_shape, relax.PrimValue(1)], sinfo_args=[R.Object()], ), name_hint=f"{name}_state_{i}", ) ) gv = bb.emit_output(caches) bb.emit_func_output(gv)
NOTE: It's not typical kv-cache, but try to reuse the logic for the quick hack.
create_kv_cache_func
python
llSourcell/Doctor-Dignity
mlc_llm/relax_model/rwkv.py
https://github.com/llSourcell/Doctor-Dignity/blob/master/mlc_llm/relax_model/rwkv.py
Apache-2.0
def remove_global_buf_alloc( func: tir.PrimFunc, ) -> Tuple[tir.PrimFunc, List[relax.TensorStructInfo]]: """Remove the global buffer allocation for a given TIR PrimFunc.""" assert isinstance(func.body, tir.BlockRealize) params = list(func.params) buffer_map = dict(func.buffer_map) tensor_sinfo = [] alloc_buffers = [] insertion_point = len(params) while params[insertion_point - 1].dtype != "handle": insertion_point -= 1 assert insertion_point >= 1 prev_root_block = func.body.block for buf_alloc in func.body.block.alloc_buffers: if buf_alloc.scope() == "global": param = tir.Var("var_" + buf_alloc.name, "handle") params.insert(insertion_point, param) insertion_point += 1 buffer_map[param] = buf_alloc tensor_sinfo.append( relax.TensorStructInfo(buf_alloc.shape, buf_alloc.dtype) ) else: alloc_buffers.append(buf_alloc) if len(tensor_sinfo) == 0: return func, [] assert len(prev_root_block.iter_vars) == 0 assert len(prev_root_block.reads) == 0 assert len(prev_root_block.writes) == 0 assert len(prev_root_block.match_buffers) == 0 assert prev_root_block.name_hint == "root" assert prev_root_block.init is None root_block = tir.Block( iter_vars=[], reads=[], writes=[], name_hint="root", body=prev_root_block.body, alloc_buffers=alloc_buffers, annotations=prev_root_block.annotations, ) updated_func = tir.PrimFunc( params=params, body=tir.BlockRealize(iter_values=[], predicate=True, block=root_block), ret_type=func.ret_type, buffer_map=buffer_map, attrs=func.attrs, ) return updated_func, tensor_sinfo
Remove the global buffer allocation for a given TIR PrimFunc.
remove_global_buf_alloc
python
llSourcell/Doctor-Dignity
mlc_llm/transform/lift_tir_global_buffer_alloc.py
https://github.com/llSourcell/Doctor-Dignity/blob/master/mlc_llm/transform/lift_tir_global_buffer_alloc.py
Apache-2.0
def resolve_tir_var_mapping( func: tir.PrimFunc, call: relax.Call, tensor_sinfo: List[relax.TensorStructInfo] ) -> Tuple[List[relax.TensorStructInfo], bool]: """Resolve the TIR symbolic var relationship across sides of PrimFunc and Relax Function""" var_map: Dict[tir.Var, tir.PrimExpr] = dict() n_arg = len(call.args[1].fields) for i in range(n_arg): buffer_shape = func.buffer_map[func.params[i]].shape arg_shape = call.args[1][i].struct_info.shape.values assert len(buffer_shape) == len(arg_shape) for vl, vr in zip(buffer_shape, arg_shape): if isinstance(vl, tir.Var): var_map[vl] = vr elif not isinstance(vl, tir.IntImm): return [], False ret_tensors = call.sinfo_args[0] ret_tensors = ( [ret_tensors] if isinstance(ret_tensors, relax.TensorStructInfo) else list(ret_tensors.fields) ) for i in range(len(ret_tensors)): buffer_shape = func.buffer_map[func.params[n_arg + i]].shape ret_tensor_shape = ret_tensors[i].shape.values assert len(buffer_shape) == len(ret_tensor_shape) for vl, vr in zip(buffer_shape, ret_tensor_shape): if isinstance(vl, tir.Var): var_map[vl] = vr elif not isinstance(vl, tir.IntImm): return [], False updated_tensor_sinfo = [] for sinfo in tensor_sinfo: if not contain_symbolic_var(sinfo): updated_tensor_sinfo.append(sinfo) continue new_shape = [] for v in sinfo.shape.values: new_shape.append(tir.stmt_functor.substitute(v, var_map)) updated_tensor_sinfo.append(relax.TensorStructInfo(new_shape, sinfo.dtype)) return updated_tensor_sinfo, True
Resolve the TIR symbolic var relationship across sides of PrimFunc and Relax Function
resolve_tir_var_mapping
python
llSourcell/Doctor-Dignity
mlc_llm/transform/lift_tir_global_buffer_alloc.py
https://github.com/llSourcell/Doctor-Dignity/blob/master/mlc_llm/transform/lift_tir_global_buffer_alloc.py
Apache-2.0
def analyze_func( func: relax.Function, pidx2binname: Dict[int, str], ) -> Tuple[ List[relax.Binding], Dict[relax.Var, List[relax.Binding]], Dict[relax.Binding, int], ]: """Binding grouping analysis function. It takes the function to be analyzed, and mapping from each raw tensor index to the name of the binary file where it resides. This analysis function * computes a new order of weight fetching bindings (the bindings in form `lv = params[idx]`) based on weight location on disk. * collects the dataflow def-use information of the given function for topological sort (particularly, it collects the consumers of each binding variables and the number of variables each binding depends on). Parameters ---------- func : relax.Function The weight transform function to be analyzed. pidx2binname : Dict[int, str] The mapping from each raw tensor index to the name of the binary file where it resides. Returns ------- get_param_bindings : List[relax.Binding] The weight fetching bindings (`lv = params[idx]`) in the new order. var_users : Dict[relax.Var, List[relax.Binding]] The consumer bindings of each binding variable. Used for topological sort. num_depending_vars : Dict[relax.Binding, int] The number of variables each binding depends on. Used for topological sort. """ # The mapping of the weight fetching bindings in each binary file. # Here empty string means the weight is not in any binary file (e.g., cached # sin and cos values for rotary embeddings). binname2get_param_bindings: Dict[str, List[relax.Binding]] = {"": []} # The set of binding variables. binding_var_set: Set[relax.Var] = set() var_users: Dict[relax.Var, List[relax.Binding]] = {} num_depending_vars: Dict[relax.Binding, int] = {} # Sanity check on the function pattern. assert len(func.params) == 1 assert isinstance(func.body, relax.SeqExpr) assert len(func.body.blocks) == 1 assert isinstance(func.body.blocks[0], relax.DataflowBlock) assert func.body.blocks[0].bindings[-1].var.same_as(func.body.body) params = func.params[0] bindings = func.body.blocks[0].bindings # Go through each binding except the last one. (The last one is the output # binding `gv = (lv, lv1, ...)`) which we ignore for analysis. for binding in bindings[:-1]: value = binding.value binding_var_set.add(binding.var) var_users[binding.var] = [] if isinstance(value, relax.TupleGetItem) and value.tuple_value.same_as(params): # For weight fetching bindings (`lv = params[idx]`), we group them # according to the binary file name. pidx = value.index if pidx not in pidx2binname: binname2get_param_bindings[""].append(binding) continue binname = pidx2binname[pidx] if binname in binname2get_param_bindings: binname2get_param_bindings[binname].append(binding) else: binname2get_param_bindings[binname] = [binding] else: # For other bindings, we collect the use-def information for # topological sort. num_depending_vars[binding] = 0 def fvisit(obj): if isinstance(obj, relax.Var) and obj in binding_var_set: assert obj in var_users var_users[obj].append(binding) num_depending_vars[binding] += 1 relax.analysis.post_order_visit(value, fvisit) # Get the weight fetching bindings in new order according to the group results. get_param_bindings: List[relax.Binding] = [] for bindings in binname2get_param_bindings.values(): get_param_bindings += bindings return get_param_bindings, var_users, num_depending_vars
Binding grouping analysis function. It takes the function to be analyzed, and mapping from each raw tensor index to the name of the binary file where it resides. This analysis function * computes a new order of weight fetching bindings (the bindings in form `lv = params[idx]`) based on weight location on disk. * collects the dataflow def-use information of the given function for topological sort (particularly, it collects the consumers of each binding variables and the number of variables each binding depends on). Parameters ---------- func : relax.Function The weight transform function to be analyzed. pidx2binname : Dict[int, str] The mapping from each raw tensor index to the name of the binary file where it resides. Returns ------- get_param_bindings : List[relax.Binding] The weight fetching bindings (`lv = params[idx]`) in the new order. var_users : Dict[relax.Var, List[relax.Binding]] The consumer bindings of each binding variable. Used for topological sort. num_depending_vars : Dict[relax.Binding, int] The number of variables each binding depends on. Used for topological sort.
analyze_func
python
llSourcell/Doctor-Dignity
mlc_llm/transform/reorder_transform_func.py
https://github.com/llSourcell/Doctor-Dignity/blob/master/mlc_llm/transform/reorder_transform_func.py
Apache-2.0
def reorder_func( func: relax.Function, pidx2binname: Dict[int, str], ) -> relax.Function: """Reorder the bindings of the input weight transform Relax function according the weight location in binary files. This function first analyzes the input function and gets the reordered weight fetching bindings and the use-def information for topological sort. It then reorders all bindings in the function with topological sort. Parameters ---------- func : relax.Function The weight transform function to be analyzed. pidx2binname : Dict[int, str] The mapping from each raw tensor index to the name of the binary file where it resides. Returns ------- func_updated : relax.Function The returned function where the bindings are updated with the new order. """ get_param_bindings, var_users, num_depending_vars = analyze_func(func, pidx2binname) # The bindings in the new order, output by the topological sort. new_bindings: List[relax.Binding] = [] # The queue used in the topological sort. binding_queue: List[relax.Binding] = [] for binding, n_depending in list(num_depending_vars.items()): if n_depending == 0: binding_queue.append(binding) del num_depending_vars[binding] # Start topological sort: # each time we emit a weight fetching binding, and then adds all bindings # that depend on it. for get_param_binding in get_param_bindings: binding_queue.append(get_param_binding) while len(binding_queue) > 0: binding = binding_queue.pop(0) new_bindings.append(binding) for user_binding in var_users[binding.var]: num_depending_vars[user_binding] -= 1 if num_depending_vars[user_binding] == 0: del num_depending_vars[user_binding] binding_queue.append(user_binding) # Add the output binding. new_bindings.append(func.body.blocks[0].bindings[-1]) # Sanity check on the integrity. assert len(new_bindings) == len(func.body.blocks[0].bindings) assert len(num_depending_vars) == 0 return relax.Function( func.params, relax.SeqExpr(blocks=[relax.DataflowBlock(new_bindings)], body=func.body.body), func.ret_struct_info, func.is_pure, func.attrs, )
Reorder the bindings of the input weight transform Relax function according the weight location in binary files. This function first analyzes the input function and gets the reordered weight fetching bindings and the use-def information for topological sort. It then reorders all bindings in the function with topological sort. Parameters ---------- func : relax.Function The weight transform function to be analyzed. pidx2binname : Dict[int, str] The mapping from each raw tensor index to the name of the binary file where it resides. Returns ------- func_updated : relax.Function The returned function where the bindings are updated with the new order.
reorder_func
python
llSourcell/Doctor-Dignity
mlc_llm/transform/reorder_transform_func.py
https://github.com/llSourcell/Doctor-Dignity/blob/master/mlc_llm/transform/reorder_transform_func.py
Apache-2.0
def get_lib_path(): """Get library path, name and version""" # Directly exec libinfo to get the right setup libinfo_py = os.path.join(CURRENT_DIR, "./mlc_chat/libinfo.py") libinfo = {"__file__": libinfo_py} exec(compile(open(libinfo_py, "rb").read(), libinfo_py, "exec"), libinfo, libinfo) version = libinfo["__version__"] # conda installs libraries into env instead of packaging with pip if not CONDA_BUILD: libs = [ libinfo["find_lib_path"]("mlc_llm")[0], libinfo["find_lib_path"]("mlc_llm_module")[0], ] else: libs = None return libs, version
Get library path, name and version
get_lib_path
python
llSourcell/Doctor-Dignity
python/setup.py
https://github.com/llSourcell/Doctor-Dignity/blob/master/python/setup.py
Apache-2.0
def get_delta_message(curr_message: str, new_message: str) -> str: r"""Given the current message and the new message, compute the delta message (the newly generated part, the diff of the new message from the current message). Parameters ---------- curr_message : str The message generated in the previous round. new_message : str The message generated in the new round. Returns ------- delta_message : str The diff of the new message from the current message (the newly generated part). """ f_get_delta_message = tvm.get_global_func("mlc.get_delta_message") return f_get_delta_message(curr_message, new_message)
Given the current message and the new message, compute the delta message (the newly generated part, the diff of the new message from the current message). Parameters ---------- curr_message : str The message generated in the previous round. new_message : str The message generated in the new round. Returns ------- delta_message : str The diff of the new message from the current message (the newly generated part).
get_delta_message
python
llSourcell/Doctor-Dignity
python/mlc_chat/base.py
https://github.com/llSourcell/Doctor-Dignity/blob/master/python/mlc_chat/base.py
Apache-2.0
def __call__(self, message: str = "", stopped: bool = False): r"""Process newly generated message using callback functions. Parameters ---------- message : str The newly generated message. stopped : bool Whether generation reaches an end. If True, clear the state of current message. """ if stopped: self.stopped_callback() self.curr_message = "" else: delta = get_delta_message(self.curr_message, message) self.curr_message = message self.delta_callback(delta)
Process newly generated message using callback functions. Parameters ---------- message : str The newly generated message. stopped : bool Whether generation reaches an end. If True, clear the state of current message.
__call__
python
llSourcell/Doctor-Dignity
python/mlc_chat/callback.py
https://github.com/llSourcell/Doctor-Dignity/blob/master/python/mlc_chat/callback.py
Apache-2.0
def __init__(self, callback_interval: int = 2): r"""Initialize the callback class with callback interval. Parameters ---------- callback_interval : int The refresh rate of the streaming process. """ super().__init__() self.callback_interval = callback_interval
Initialize the callback class with callback interval. Parameters ---------- callback_interval : int The refresh rate of the streaming process.
__init__
python
llSourcell/Doctor-Dignity
python/mlc_chat/callback.py
https://github.com/llSourcell/Doctor-Dignity/blob/master/python/mlc_chat/callback.py
Apache-2.0
def _get_model_path(model: str) -> (str, str): """Use user-provided argument ``model`` to search for a valid model path. We define "valid" as having an ``mlc-chat-config.json`` right under the folder. Parameters ---------- model : str User's input; may be a compiled model's name, or a full path. Returns ------ model_path : str A "valid" path to model folder, with ``os.isfile(os.path.join(model_path, "mlc-chat-config.json"))`` being ``True``. chat_file : str Essentially ``os.path.join(model_path, "mlc-chat-config.json")``. Raises ------ FileNotFoundError: if we cannot find a valid `model_path`. """ # Note that the order of this list corresponds to our search priority candidate_paths = [ f"{model}", # full path, or just the name f"dist/prebuilt/{model}", # Using prebuilt workflow f"dist/{model}/params", # Default directory after mlc_llm.build_model() f"dist/prebuilt/mlc-chat-{model}", # Also prebuilt workflow, but missed prefix ] # Look for the first folder that has `mlc-chat-config.json` under it for candidate in candidate_paths: chat_file = os.path.join(candidate, "mlc-chat-config.json") if os.path.isfile(chat_file): logging.info(f"Using model folder: {os.path.abspath(candidate)}") logging.info(f"Using mlc chat config: {os.path.abspath(chat_file)}") return candidate, chat_file # Failed to find a valid model_path, analyzing error for user # First see if any candidate path is an actual folder found_folder = False valid_dir_str = "" for candidate in candidate_paths: if os.path.isdir(candidate): valid_dir_str += f"- {os.path.abspath(candidate)}\n" found_folder = True if found_folder: # Error 1: there is a folder, but not an mlc-llm model folder (E1) err_msg = ( "The model folder provided does not seem to refer to a valid mlc-llm model folder.\n" "Specifically, we cannot find `mlc-chat-config.json`, a required file. You should " "provide a path that contains the file.\n" "According to your input `model`, we looked at folder(s):\n" f"{valid_dir_str}" "MLC-Chat consumes models that are processed by the MLC-LLM build process.\n" f"Please checkout {_PYTHON_GET_STARTED_TUTORIAL_URL} for an example on " "how to load a model." ) raise FileNotFoundError(err_msg) else: # Error 2: cannot find a folder (E0) all_paths_str = "" for path in candidate_paths: all_paths_str += f"- {path}\n" err_msg = ( "Cannot find the model folder. We searched over the following possible paths:\n" f"{all_paths_str}" "You can try to pass in `model=/path/to/your-model-path`, and confirm " "that it contains `mlc-chat-config.json`, among other essential files.\n" f"Please checkout {_PYTHON_GET_STARTED_TUTORIAL_URL} for an " "example on how to load a model." ) raise FileNotFoundError(err_msg)
Use user-provided argument ``model`` to search for a valid model path. We define "valid" as having an ``mlc-chat-config.json`` right under the folder. Parameters ---------- model : str User's input; may be a compiled model's name, or a full path. Returns ------ model_path : str A "valid" path to model folder, with ``os.isfile(os.path.join(model_path, "mlc-chat-config.json"))`` being ``True``. chat_file : str Essentially ``os.path.join(model_path, "mlc-chat-config.json")``. Raises ------ FileNotFoundError: if we cannot find a valid `model_path`.
_get_model_path
python
llSourcell/Doctor-Dignity
python/mlc_chat/chat_module.py
https://github.com/llSourcell/Doctor-Dignity/blob/master/python/mlc_chat/chat_module.py
Apache-2.0
def _get_chat_config(config_file_path: str, user_chat_config: Optional[ChatConfig]) -> ChatConfig: """Read in the config file in model path, then potentially override with user input. Parameters ---------- config_file_path : str ``chat_file`` returned by ``_get_model_path()``. user_chat_config : Optional[ChatConfig] User's input, a partial ``ChatConfig`` to override the one in ``config_file_path``. Returns ------ final_chat_config : ChatConfig ``ChatConfig`` corresponding to ``config_file_path``, overriden by ``user_chat_config``. """ final_chat_config = None with open(config_file_path, mode="rt", encoding="utf-8") as f: json_object = json.load(f) final_chat_config = ChatConfig._from_json(json_object) if user_chat_config is not None: # We override using user's chat config for field in fields(user_chat_config): field_name = field.name field_value = getattr(user_chat_config, field_name) if field_value is not None: setattr(final_chat_config, field_name, field_value) return final_chat_config
Read in the config file in model path, then potentially override with user input. Parameters ---------- config_file_path : str ``chat_file`` returned by ``_get_model_path()``. user_chat_config : Optional[ChatConfig] User's input, a partial ``ChatConfig`` to override the one in ``config_file_path``. Returns ------ final_chat_config : ChatConfig ``ChatConfig`` corresponding to ``config_file_path``, overriden by ``user_chat_config``.
_get_chat_config
python
llSourcell/Doctor-Dignity
python/mlc_chat/chat_module.py
https://github.com/llSourcell/Doctor-Dignity/blob/master/python/mlc_chat/chat_module.py
Apache-2.0
def _get_lib_module( model: str, model_path: str, chat_config: ChatConfig, lib_path: Optional[str], device_name: str, config_file_path: str, ) -> tvm.runtime.Module: """Look up the model library. Then return a corresponding ``tvm`` runtime Module. Parameters ---------- model : str User's input; may be a compiled model's name, or a full path. model_path : str Model path found by `_get_model_path`. chat_config : ChatConfig Chat config after potential overrides. Returned by ``_get_chat_config``. lib_path : Optional[str] User's input. Supposedly a full path to model library. Prioritized to use. device_name : str User's input. Used to construct the library model file name. config_file_path : str The path to ``mlc-chat-config.json``. Used for error message making. Returns ------ lib_module : tvm.runtime.Module A tvm runtime module corresponding to the model library we find. Raises ------ FileNotFoundError: if we cannot find a valid model library file. """ # 1. Use user's lib_path if provided if lib_path is not None: if os.path.isfile(lib_path): logging.info(f"Using library model: {lib_path}") return tvm.runtime.load_module(lib_path) else: err_msg = ( f"The `lib_path` you passed in is not a file: {lib_path}.\nPlease checkout " f"{_PYTHON_GET_STARTED_TUTORIAL_URL} for an example on how to load a model." ) raise FileNotFoundError(err_msg) # 2. Generate all possible file names according to OS candidate_lib_names = [] if sys.platform.startswith("linux"): candidate_lib_names = [f"{chat_config.model_lib}-{device_name}.so"] elif sys.platform.startswith("Darwin"): # Note that `dylib` comes before `so` since we prioritize `dylib` for MacOS candidate_lib_names = [ f"{chat_config.model_lib}-{device_name}.dylib", f"{chat_config.model_lib}-{device_name}.so", ] elif sys.platform.startswith("win32"): candidate_lib_names = [f"{chat_config.model_lib}-{device_name}.dll"] else: candidate_lib_names = [ f"{chat_config.model_lib}-{device_name}.dylib", f"{chat_config.model_lib}-{device_name}.so", f"{chat_config.model_lib}-{device_name}.dll", ] # 3. Generate possible model library paths candidate_paths = [] for lib_name in candidate_lib_names: # Equivalent to {model_path}/../ pardir_model_path = os.path.abspath(os.path.join(os.path.abspath(model_path), os.pardir)) candidate_paths.extend( [ f"{lib_name}", f"dist/prebuilt/lib/{lib_name}", # Using prebuilt workflow f"dist/{model}/{lib_name}", # Default directory after mlc_llm.build_model() os.path.join(model_path, lib_name), # User put library inside `model_path` os.path.join(pardir_model_path, lib_name), # Under parent directory of `model_path` ] ) # 4. Search for model library for candidate in candidate_paths: if os.path.isfile(candidate): logging.info(f"Using library model: {os.path.abspath(candidate)}\n") return tvm.runtime.load_module(candidate) # 5. Error err_msg = ( f"Cannot find the model library that corresponds to `{chat_config.model_lib}`.\n" f"`{chat_config.model_lib}` is either provided in the `chat_config` " f"you passed in, or specified in {config_file_path}.\n" "We searched over the following possible paths: \n" ) for candidate in candidate_paths: err_msg += f"- {candidate}\n" err_msg += ( "If you would like to directly specify the model library path, you may " "consider passing in the `lib_path` parameter.\n" f"Please checkout {_PYTHON_GET_STARTED_TUTORIAL_URL} for an example " "on how to load a model." ) raise FileNotFoundError(err_msg)
Look up the model library. Then return a corresponding ``tvm`` runtime Module. Parameters ---------- model : str User's input; may be a compiled model's name, or a full path. model_path : str Model path found by `_get_model_path`. chat_config : ChatConfig Chat config after potential overrides. Returned by ``_get_chat_config``. lib_path : Optional[str] User's input. Supposedly a full path to model library. Prioritized to use. device_name : str User's input. Used to construct the library model file name. config_file_path : str The path to ``mlc-chat-config.json``. Used for error message making. Returns ------ lib_module : tvm.runtime.Module A tvm runtime module corresponding to the model library we find. Raises ------ FileNotFoundError: if we cannot find a valid model library file.
_get_lib_module
python
llSourcell/Doctor-Dignity
python/mlc_chat/chat_module.py
https://github.com/llSourcell/Doctor-Dignity/blob/master/python/mlc_chat/chat_module.py
Apache-2.0
def _detect_local_device(device_id: int = 0): """Automatically detect the local device if user does not specify. Parameters ---------- device_id : int The local device id. Returns ------ dev : Device The local device. """ if tvm.metal().exist: return tvm.metal(device_id), "metal" if tvm.rocm().exist: return tvm.rocm(device_id), "rocm" if tvm.cuda().exist: return tvm.cuda(device_id), "cuda" if tvm.vulkan().exist: return tvm.vulkan(device_id), "vulkan" if tvm.opencl().exist: return tvm.opencl(device_id), "opencl" logging.info( "None of the following device is detected: metal, rocm, cuda, vulkan, opencl. Switch to llvm instead." ) return tvm.cpu(device_id), "llvm"
Automatically detect the local device if user does not specify. Parameters ---------- device_id : int The local device id. Returns ------ dev : Device The local device.
_detect_local_device
python
llSourcell/Doctor-Dignity
python/mlc_chat/chat_module.py
https://github.com/llSourcell/Doctor-Dignity/blob/master/python/mlc_chat/chat_module.py
Apache-2.0
def generate(self, prompt: str, progress_callback=None) -> str: r"""A high-level method that returns the full response from the chat module given a user prompt. User can optionally specify which callback method to use upon receiving the response. By default, no callback will be applied. Parameters ---------- prompt : str The user input prompt, i.e. a question to ask the chat module. progress_callback: object The optional callback method used upon receiving a newly generated message from the chat module. See `mlc_chat/callback.py` for a full list of available callback classes. Currently, only streaming to stdout callback method is supported, see `Examples` for more detailed usage. Returns ------- output : string The generated full output from the chat module. Examples -------- .. code-block:: python # Suppose we would like to stream the response of the chat module to stdout # with a refresh interval of 2. Upon calling generate(), We will see the response of # the chat module streaming to stdout piece by piece, and in the end we receive the # full response as a single string `output`. from mlc_chat import ChatModule, callback cm = ChatModule(xxx) prompt = "what's the color of banana?" output = cm.generate(prompt, callback.StreamToStdout(callback_interval=2)) print(output) """ self._prefill(prompt) if not progress_callback: while not self._stopped(): self._decode() new_msg = self._get_message() return new_msg # apply callback with a rate of callback_interval i, new_msg = 0, "" while not self._stopped(): self._decode() if i % progress_callback.callback_interval == 0 or self._stopped(): new_msg = self._get_message() progress_callback(new_msg) i += 1 progress_callback(stopped=True) return new_msg
A high-level method that returns the full response from the chat module given a user prompt. User can optionally specify which callback method to use upon receiving the response. By default, no callback will be applied. Parameters ---------- prompt : str The user input prompt, i.e. a question to ask the chat module. progress_callback: object The optional callback method used upon receiving a newly generated message from the chat module. See `mlc_chat/callback.py` for a full list of available callback classes. Currently, only streaming to stdout callback method is supported, see `Examples` for more detailed usage. Returns ------- output : string The generated full output from the chat module. Examples -------- .. code-block:: python # Suppose we would like to stream the response of the chat module to stdout # with a refresh interval of 2. Upon calling generate(), We will see the response of # the chat module streaming to stdout piece by piece, and in the end we receive the # full response as a single string `output`. from mlc_chat import ChatModule, callback cm = ChatModule(xxx) prompt = "what's the color of banana?" output = cm.generate(prompt, callback.StreamToStdout(callback_interval=2)) print(output)
generate
python
llSourcell/Doctor-Dignity
python/mlc_chat/chat_module.py
https://github.com/llSourcell/Doctor-Dignity/blob/master/python/mlc_chat/chat_module.py
Apache-2.0
def reset_chat(self, chat_config: Optional[ChatConfig] = None): r"""Reset the chat session, clear all chat history, and potentially override the original `mlc-chat-config.json`. Parameters ---------- chat_config : Optional[ChatConfig] A ``ChatConfig`` instance partially filled. If specified, the chat module will reload the `mlc-chat-config.json`, and override it with ``chat_config``, just like in initialization. Note ---- The model remains the same after :func:`reset_chat`. To reload module, please either re-initialize a :class:`ChatModule` instance or use :func:`_reload` instead. """ self._reset_chat_func() if chat_config is not None: # Redo the overriding self.chat_config = _get_chat_config(self.config_file_path, chat_config) user_chat_config_json_str = _convert_chat_config_to_json_str( chat_config, self.chat_config.conv_template ) # Second argument is `partial_update = True` self._load_json_override_func(user_chat_config_json_str, True)
Reset the chat session, clear all chat history, and potentially override the original `mlc-chat-config.json`. Parameters ---------- chat_config : Optional[ChatConfig] A ``ChatConfig`` instance partially filled. If specified, the chat module will reload the `mlc-chat-config.json`, and override it with ``chat_config``, just like in initialization. Note ---- The model remains the same after :func:`reset_chat`. To reload module, please either re-initialize a :class:`ChatModule` instance or use :func:`_reload` instead.
reset_chat
python
llSourcell/Doctor-Dignity
python/mlc_chat/chat_module.py
https://github.com/llSourcell/Doctor-Dignity/blob/master/python/mlc_chat/chat_module.py
Apache-2.0
def benchmark_generate(self, prompt: str, generate_length: int) -> str: r"""Controlled generation with input prompt and fixed number of generated tokens, ignoring system prompt. For example, .. code:: python from mlc_chat import ChatModule cm = ChatModule(model="Llama-2-7b-chat-hf-q4f16_1") output = cm.benchmark_generate("What's the meaning of life?", generate_length=256) print(f"Generated text:\n{output}\n") print(f"Statistics: {cm.stats()}") will generate 256 tokens in total based on prompt "What's the meaning of life?". After generation, you can use `cm.stats()` to print the generation speed. Notes ----- 1. This function is typically used in controlled benchmarks. It generates text without system prompt (i.e., it is pure text generation with no chat style) and ignores the token stop model(s). 2. To make the benchmark as accurate as possible, we first do a round of warmup prefill and decode before text generation. 3. This function resets the previous performance statistics. Parameters ---------- prompt : str The prompt of the text generation. generate_length : int The target length of generation. Returns ------- output : str The generated text output. """ if generate_length < 0: raise ValueError( "The generation length is expected to be non-negative, " f"while the given length is {generate_length}" ) # warmup run self.reset_chat() self._prefill(prompt) self._decode() return self._raw_generate_func(prompt, generate_length)
Controlled generation with input prompt and fixed number of generated tokens, ignoring system prompt. For example, .. code:: python from mlc_chat import ChatModule cm = ChatModule(model="Llama-2-7b-chat-hf-q4f16_1") output = cm.benchmark_generate("What's the meaning of life?", generate_length=256) print(f"Generated text:\n{output}\n") print(f"Statistics: {cm.stats()}") will generate 256 tokens in total based on prompt "What's the meaning of life?". After generation, you can use `cm.stats()` to print the generation speed. Notes ----- 1. This function is typically used in controlled benchmarks. It generates text without system prompt (i.e., it is pure text generation with no chat style) and ignores the token stop model(s). 2. To make the benchmark as accurate as possible, we first do a round of warmup prefill and decode before text generation. 3. This function resets the previous performance statistics. Parameters ---------- prompt : str The prompt of the text generation. generate_length : int The target length of generation. Returns ------- output : str The generated text output.
benchmark_generate
python
llSourcell/Doctor-Dignity
python/mlc_chat/chat_module.py
https://github.com/llSourcell/Doctor-Dignity/blob/master/python/mlc_chat/chat_module.py
Apache-2.0
def _prefill( self, input: str, decode_next_token: bool = True, place_in_prompt: PlaceInPrompt = PlaceInPrompt.All, ): r"""Run prefill stage for a given input and optionally decode the first output token. User can decide where to place the input in the prompt. Parameters ---------- input : str The user input string. decode_next_token : bool Whether to decode the next token after prefilling. place_in_prompt: PlaceInPrompt The place of the input message in the prompt. See `class PlaceInPrompt` for details. """ self._prefill_func(input, decode_next_token, place_in_prompt.value)
Run prefill stage for a given input and optionally decode the first output token. User can decide where to place the input in the prompt. Parameters ---------- input : str The user input string. decode_next_token : bool Whether to decode the next token after prefilling. place_in_prompt: PlaceInPrompt The place of the input message in the prompt. See `class PlaceInPrompt` for details.
_prefill
python
llSourcell/Doctor-Dignity
python/mlc_chat/chat_module.py
https://github.com/llSourcell/Doctor-Dignity/blob/master/python/mlc_chat/chat_module.py
Apache-2.0
def _get_all_available_models_under_dir(artifact_path: str) -> Dict[str, str]: r"""Given the artifact path storing all models, returns a dict mapping available model names to the correct `model` args passed into ChatModule. Note ---- We only search for folders under the artifact_path, without recursive search for subfolders. For each folder, we count it as a valid MLC model folder if either it contains a `mlc-chat-config.json` file, or it contains a `params` folder which contains a `mlc-chat-config.json` file. We will map the name of a valid folder to its full path to the folder containing `mlc-chat-config.json`. """ # step 0. retrieve the absolute path of artifact_path search_dir = os.path.abspath(artifact_path) if not os.path.exists(search_dir): err_msg = ( f"The artifact path {artifact_path} you provided is neither a valid full path nor a valid path ", "relative to the current working directory. Please provide a correct artifact path.", ) raise FileNotFoundError(err_msg) # step 1. go through all the folders, build the model dict model_dict = {} for path in glob.glob(os.path.join(search_dir, "*")): if os.path.isdir(path): model_name = os.path.basename(os.path.normpath(path)) # check if it contains `mlc-chat-config.json` if os.path.exists(os.path.join(path, "mlc-chat-config.json")): model_dict[model_name] = os.path.abspath(path) # check if it contains `params/mlc-chat-config.json` elif os.path.exists(os.path.join(path, "params", "mlc-chat-config.json")): model_dict[model_name] = os.path.abspath(os.path.join(path, "params")) return model_dict
Given the artifact path storing all models, returns a dict mapping available model names to the correct `model` args passed into ChatModule. Note ---- We only search for folders under the artifact_path, without recursive search for subfolders. For each folder, we count it as a valid MLC model folder if either it contains a `mlc-chat-config.json` file, or it contains a `params` folder which contains a `mlc-chat-config.json` file. We will map the name of a valid folder to its full path to the folder containing `mlc-chat-config.json`.
_get_all_available_models_under_dir
python
llSourcell/Doctor-Dignity
python/mlc_chat/gradio.py
https://github.com/llSourcell/Doctor-Dignity/blob/master/python/mlc_chat/gradio.py
Apache-2.0
def gradio_reload_model(self, model_name: str): r"""Reload the model given the user-selected model name.""" self.chat_mod = ChatModule(self.model_dict[model_name], self.device_str) updated_dict = { "chatbot": None, "chat_state": [], "img_list": [], "image_model": gr.update(interactive=False, visible=False), "stream_interval": gr.update(interactive=True, visible=True), "reset_llm_button": gr.update(interactive=True, visible=True), "stats_button": gr.update(interactive=True, visible=True), "stats_output": gr.update(placeholder="Click to get runtime statistics.", visible=True), "text_input": gr.update(interactive=True, placeholder="Type and press enter"), } return list(updated_dict.values())
Reload the model given the user-selected model name.
gradio_reload_model
python
llSourcell/Doctor-Dignity
python/mlc_chat/gradio.py
https://github.com/llSourcell/Doctor-Dignity/blob/master/python/mlc_chat/gradio.py
Apache-2.0
def gradio_ask(self, text_input, chatbot): r"""Display user text input in the chatbot.""" chatbot = chatbot + [[text_input, None]] text_input = "" return text_input, chatbot
Display user text input in the chatbot.
gradio_ask
python
llSourcell/Doctor-Dignity
python/mlc_chat/gradio.py
https://github.com/llSourcell/Doctor-Dignity/blob/master/python/mlc_chat/gradio.py
Apache-2.0
def gradio_answer(self, chatbot, stream_interval): r"""Generate and display the chat module's response. Note: Below is a low-level implementation of generate() API, since it's easier to yield without delta callback.""" prompt = chatbot[-1][0] self.chat_mod._prefill(prompt) i, new_msg = 0, "" while not self.chat_mod._stopped(): self.chat_mod._decode() if i % stream_interval == 0 or self.chat_mod._stopped(): new_msg = self.chat_mod._get_message() chatbot[-1][1] = new_msg yield chatbot i += 1
Generate and display the chat module's response. Note: Below is a low-level implementation of generate() API, since it's easier to yield without delta callback.
gradio_answer
python
llSourcell/Doctor-Dignity
python/mlc_chat/gradio.py
https://github.com/llSourcell/Doctor-Dignity/blob/master/python/mlc_chat/gradio.py
Apache-2.0
def launch_gradio( artifact_path: str = "dist", device: str = "auto", port: int = 7860, share: bool = False, host: str = "127.0.0.1"): r"""Launch the gradio interface with a given port, creating a publically sharable link if specified.""" # create a gradio module mod = GradioModule(artifact_path, device) title = """<h1 align="center">MLC Chat Gradio Interface</h1>""" description = ( """<h3>Welcome to MLC Chat! Pick a model from your local ids to get started.</h3>""" ) with gr.Blocks() as demo: gr.Markdown(title) gr.Markdown(description) # ---------------------- user interface design ------------------------- with gr.Row(): with gr.Column(scale=0.3): llm_model = gr.Dropdown(list(mod.model_dict.keys()), label="Language Model") image_model = gr.Dropdown( ["-None-"], label="Do you wanna add an image model?", visible=False, interactive=False, ) image = gr.Image(type="pil", interactive=False, visible=False) stream_interval = gr.Slider( minimum=1.0, maximum=5.0, value=2.0, step=1.0, interactive=True, visible=False, label="Stream Interval", ) reset_llm_button = gr.Button("Reset chat", visible=False, interactive=False) stats_button = gr.Button("Get Runtime Statistics", interactive=False, visible=False) stats_output = gr.Textbox( show_label=False, placeholder="Click to get runtime statistics.", interactive=False, visible=False, container=False, ) with gr.Column(): chat_state = gr.State() img_list = gr.State() chatbot = gr.Chatbot(label="MLC Chat") text_input = gr.Textbox( show_label=False, placeholder="Select a model to start chatting!", interactive=False, container=False, ) # ---------------------- local variables --------------------------- # type 1. buttons whose visibility change when llm reload llm_buttons = [ image_model, stream_interval, reset_llm_button, stats_button, stats_output, text_input, ] # type 2. buttons whose visibility change when image model reload # pylint: disable=unused-variable image_model_buttons = [image, text_input] # type 3. chatbot state variables chatbot_vars = [chatbot, chat_state, img_list] # -------------------------- handle control -------------------------- llm_model.change( mod.gradio_reload_model, [llm_model], chatbot_vars + llm_buttons, queue=False ) text_input.submit(mod.gradio_ask, [text_input, chatbot], [text_input, chatbot]).then( mod.gradio_answer, [chatbot, stream_interval], [chatbot] ) reset_llm_button.click(mod.gradio_reset_model, [], chatbot_vars + [text_input]) stats_button.click(mod.gradio_stats, [], [stats_output]) # launch to the web demo.launch(share=share, enable_queue=True, server_port=port,server_name=host)
Launch the gradio interface with a given port, creating a publically sharable link if specified.
launch_gradio
python
llSourcell/Doctor-Dignity
python/mlc_chat/gradio.py
https://github.com/llSourcell/Doctor-Dignity/blob/master/python/mlc_chat/gradio.py
Apache-2.0
def get_dll_directories(): """Get extra mlc llm dll directories""" curr_dir = os.path.dirname(os.path.realpath(os.path.expanduser(__file__))) source_dir = os.path.abspath(os.path.join(curr_dir, "..", "..")) dll_path = [ curr_dir, os.path.join(source_dir, "build"), os.path.join(source_dir, "build", "Release"), ] if "MLC_LIBRARY_PATH" in os.environ: dll_path.append(os.environ["MLC_LIBRARY_PATH"]) if "CONDA_PREFIX" in os.environ: dll_path.append(os.path.join(os.environ["CONDA_PREFIX"], "lib")) if sys.platform.startswith("linux") or sys.platform.startswith("freebsd"): dll_path.extend(get_env_paths("LD_LIBRARY_PATH", ":")) elif sys.platform.startswith("darwin"): dll_path.extend(get_env_paths("DYLD_LIBRARY_PATH", ":")) elif sys.platform.startswith("win32"): dll_path.extend(get_env_paths("PATH", ";")) return [os.path.abspath(p) for p in dll_path if os.path.isdir(p)]
Get extra mlc llm dll directories
get_dll_directories
python
llSourcell/Doctor-Dignity
python/mlc_chat/libinfo.py
https://github.com/llSourcell/Doctor-Dignity/blob/master/python/mlc_chat/libinfo.py
Apache-2.0
def find_lib_path(name, optional=False): """Find mlc llm library Parameters ---------- name : str The name of the library optional: boolean Whether the library is required """ if sys.platform.startswith("linux") or sys.platform.startswith("freebsd"): lib_name = f"lib{name}.so" elif sys.platform.startswith("win32"): lib_name = f"{name}.dll" elif sys.platform.startswith("darwin"): lib_name = f"lib{name}.dylib" else: lib_name = f"lib{name}.so" dll_paths = get_dll_directories() lib_dll_path = [os.path.join(p, lib_name) for p in dll_paths] lib_found = [p for p in lib_dll_path if os.path.exists(p) and os.path.isfile(p)] if not lib_found: if not optional: message = ( f"Cannot find libraries: {lib_name}\n" + "List of candidates:\n" + "\n".join(lib_dll_path) ) raise RuntimeError(message) return lib_found
Find mlc llm library Parameters ---------- name : str The name of the library optional: boolean Whether the library is required
find_lib_path
python
llSourcell/Doctor-Dignity
python/mlc_chat/libinfo.py
https://github.com/llSourcell/Doctor-Dignity/blob/master/python/mlc_chat/libinfo.py
Apache-2.0
def convert_args_to_argparser() -> argparse.ArgumentParser: """Convert from RestAPIArgs to an equivalent ArgumentParser.""" args = argparse.ArgumentParser("MLC Chat REST API") for field in fields(RestAPIArgs): name = field.name.replace("_", "-") field_name = f"--{name}" # `kwargs` contains `help`, `choices`, and `action` kwargs = field.metadata.copy() if field.type == bool: # boolean arguments do not need to specify `type` args.add_argument(field_name, default=field.default, **kwargs) else: args.add_argument(field_name, type=field.type, default=field.default, **kwargs) return args
Convert from RestAPIArgs to an equivalent ArgumentParser.
convert_args_to_argparser
python
llSourcell/Doctor-Dignity
python/mlc_chat/rest.py
https://github.com/llSourcell/Doctor-Dignity/blob/master/python/mlc_chat/rest.py
Apache-2.0
async def request_completion(request: ChatCompletionRequest): """ Creates model response for the given chat conversation. """ if len(request.messages) > 1: raise ValueError( """ The /v1/chat/completions endpoint currently only supports single message prompts. Please ensure your request contains only one message """) if request.stream: session["chat_mod"]._prefill(input=request.messages[0].content) async def iter_response(): prev_txt = "" async for content in AsyncChatCompletionStream(): if content: chunk = ChatCompletionStreamResponse( choices=[ ChatCompletionResponseStreamChoice( index=0, delta=DeltaMessage( role="assistant", content=content[len(prev_txt) :] ), finish_reason="stop", ) ] ) prev_txt = content yield f"data: {chunk.json(exclude_unset=True)}\n\n" return StreamingResponse(iter_response(), media_type="text/event-stream") else: msg = session["chat_mod"].generate(prompt=request.messages[0].content) return ChatCompletionResponse( choices=[ ChatCompletionResponseChoice( index=0, message=ChatMessage(role="assistant", content=msg), finish_reason="stop", ) ], # TODO: Fill in correct usage info usage=UsageInfo(prompt_tokens=0, completion_tokens=0, total_tokens=0), )
Creates model response for the given chat conversation.
request_completion
python
llSourcell/Doctor-Dignity
python/mlc_chat/rest.py
https://github.com/llSourcell/Doctor-Dignity/blob/master/python/mlc_chat/rest.py
Apache-2.0
async def reset(): """ Reset the chat for the currently initialized model. """ session["chat_mod"].reset_chat()
Reset the chat for the currently initialized model.
reset
python
llSourcell/Doctor-Dignity
python/mlc_chat/rest.py
https://github.com/llSourcell/Doctor-Dignity/blob/master/python/mlc_chat/rest.py
Apache-2.0
def _chunk_tokens(self, texts: Sequence[str]) -> Tuple[List[List], List[int]]: """Tokenize and chunk texts to fit in the model's context window.""" if not self.embedding_ctx_length: raise ValueError( "embedding_ctx_length must be defined to use _get_len_safe_embeddings." ) try: import tiktoken except ImportError: raise ImportError( "Could not import tiktoken python package. " "This is needed in order to for OpenAIEmbeddings. " "Please install it with `pip install tiktoken`." ) tokens = [] indices = [] model_name = self.tiktoken_model_name or self.model try: encoding = tiktoken.encoding_for_model(model_name) except KeyError: logger.warning("Warning: model not found. Using cl100k_base encoding.") model = "cl100k_base" encoding = tiktoken.get_encoding(model) for i, text in enumerate(texts): if self.model.endswith("001"): # See: https://github.com/openai/openai-python/issues/418#issuecomment-1525939500 # replace newlines, which can negatively affect performance. text = text.replace("\n", " ") token = encoding.encode( text, allowed_special=self.allowed_special, disallowed_special=self.disallowed_special, ) for j in range(0, len(token), self.embedding_ctx_length): tokens.append(token[j : j + self.embedding_ctx_length]) indices.append(i) return tokens, indices
Tokenize and chunk texts to fit in the model's context window.
_chunk_tokens
python
llSourcell/Doctor-Dignity
python/mlc_chat/embeddings/openai.py
https://github.com/llSourcell/Doctor-Dignity/blob/master/python/mlc_chat/embeddings/openai.py
Apache-2.0
def embed_documents( self, texts: List[str], chunk_size: Optional[int] = None ) -> List[List[float]]: """Call out to OpenAI's embedding endpoint for embedding search docs. Args: texts: The list of texts to embed. chunk_size: The chunk size of embeddings. If None, will use the chunk size specified by the class. Returns: List of embeddings, one for each text. """ # NOTE: to keep things simple, as long as the embedding_ctx_length is defined, # we assume the list may contain texts longer than the maximum context and # use length-safe embedding function. if self.embedding_ctx_length: return self._get_len_safe_embeddings( texts, engine=self.deployment, chunk_size=chunk_size ) embeddings = self._batch_embed(texts, chunk_size=chunk_size) return [(np.array(e) / np.linalg.norm(e)).tolist() for e in embeddings]
Call out to OpenAI's embedding endpoint for embedding search docs. Args: texts: The list of texts to embed. chunk_size: The chunk size of embeddings. If None, will use the chunk size specified by the class. Returns: List of embeddings, one for each text.
embed_documents
python
llSourcell/Doctor-Dignity
python/mlc_chat/embeddings/openai.py
https://github.com/llSourcell/Doctor-Dignity/blob/master/python/mlc_chat/embeddings/openai.py
Apache-2.0
async def aembed_documents( self, texts: List[str], chunk_size: Optional[int] = 0 ) -> List[List[float]]: """Call out to OpenAI's embedding endpoint async for embedding search docs. Args: texts: The list of texts to embed. chunk_size: The chunk size of embeddings. If None, will use the chunk size specified by the class. Returns: List of embeddings, one for each text. """ # NOTE: to keep things simple, as long as the embedding_ctx_length is defined, # we assume the list may contain texts longer than the maximum context and # use length-safe embedding function. if self.embedding_ctx_length: return await self._aget_len_safe_embeddings(texts, engine=self.deployment) embeddings = await self._abatch_embed(texts, chunk_size=chunk_size) return [(np.array(e) / np.linalg.norm(e)).tolist() for e in embeddings]
Call out to OpenAI's embedding endpoint async for embedding search docs. Args: texts: The list of texts to embed. chunk_size: The chunk size of embeddings. If None, will use the chunk size specified by the class. Returns: List of embeddings, one for each text.
aembed_documents
python
llSourcell/Doctor-Dignity
python/mlc_chat/embeddings/openai.py
https://github.com/llSourcell/Doctor-Dignity/blob/master/python/mlc_chat/embeddings/openai.py
Apache-2.0
async def aembed_query(self, text: str) -> List[float]: """Call out to OpenAI's embedding endpoint async for embedding query text. Args: text: The text to embed. Returns: Embedding for the text. """ embeddings = await self.aembed_documents([text]) return embeddings[0]
Call out to OpenAI's embedding endpoint async for embedding query text. Args: text: The text to embed. Returns: Embedding for the text.
aembed_query
python
llSourcell/Doctor-Dignity
python/mlc_chat/embeddings/openai.py
https://github.com/llSourcell/Doctor-Dignity/blob/master/python/mlc_chat/embeddings/openai.py
Apache-2.0
def old_make_args(): """The exact old way of creating `ArgumentParser`, used to test whether `BuildArgs` is equivalent to this. """ args = argparse.ArgumentParser() args.add_argument( "--model", type=str, default="auto", help=( 'The name of the model to build. If it is "auto", we will ' 'automatically set the model name according to "--model-path", ' '"hf-path" or the model folders under "--artifact-path/models"' ) ) args.add_argument( "--hf-path", type=str, default=None, help="Hugging Face path from which to download params, tokenizer, and config", ) args.add_argument( "--quantization", type=str, choices=[*utils.quantization_schemes.keys()], default=list(utils.quantization_schemes.keys())[0], help="The quantization mode we use to compile." ) args.add_argument( "--max-seq-len", type=int, default=-1, help="The maximum allowed sequence length for the model." ) args.add_argument( "--target", type=str, default="auto", help="The target platform to compile the model for." ) args.add_argument( "--reuse-lib", type=str, default=None, help="Whether to reuse a previously generated lib.", ) args.add_argument( "--artifact-path", type=str, default="dist", help="Where to store the output." ) args.add_argument( "--use-cache", type=int, default=1, help="Whether to use previously pickled IRModule and skip trace.", ) args.add_argument( "--debug-dump", action="store_true", default=False, help="Whether to dump debugging files during compilation." ) args.add_argument( "--debug-load-script", action="store_true", default=False, help="Whether to load the script for debugging." ) args.add_argument( "--llvm-mingw", type=str, default="", help="/path/to/llvm-mingw-root, use llvm-mingw to cross compile to windows.", ) args.add_argument( "--system-lib", action="store_true", default=False, help="A parameter to `relax.build`." ) args.add_argument( "--sep-embed", action="store_true", default=False, help=( "Build with separated embedding layer, only applicable to LlaMa. " "This feature is in testing stage, and will be formally replaced after " "massive overhaul of embedding feature for all models and use cases" ), ) return args
The exact old way of creating `ArgumentParser`, used to test whether `BuildArgs` is equivalent to this.
old_make_args
python
llSourcell/Doctor-Dignity
tests/python/test_build_args.py
https://github.com/llSourcell/Doctor-Dignity/blob/master/tests/python/test_build_args.py
Apache-2.0
def argparsers_equal(self, parse_a: argparse.ArgumentParser, parse_b: argparse.ArgumentParser): """ Small helper to check pseudo-equality of parsed arguments on `ArgumentParser` instances. """ self.assertEqual(len(parse_a._actions), len(parse_b._actions)) # pylint: disable=protected-access for x, y in zip(parse_a._actions, parse_b._actions): # pylint: disable=protected-access xx = {k: v for k, v in vars(x).items() if k != "container"} yy = {k: v for k, v in vars(y).items() if k != "container"} # Choices with mixed type have custom function as "type" # So we need to compare results directly for equality if xx.get("choices", None) and yy.get("choices", None): for expected_choice in yy["choices"] + xx["choices"]: self.assertEqual(xx["type"](expected_choice), yy["type"](expected_choice)) del xx["type"], yy["type"] self.assertEqual(xx, yy)
Small helper to check pseudo-equality of parsed arguments on `ArgumentParser` instances.
argparsers_equal
python
llSourcell/Doctor-Dignity
tests/python/test_build_args.py
https://github.com/llSourcell/Doctor-Dignity/blob/master/tests/python/test_build_args.py
Apache-2.0
def test_namespaces_are_equivalent_str(self): """Tests whether the resulting namespaces from command line entry and Python API entry are equivalent, as they are passed down to the same workflow.""" # Namespace that would be created through Python API build_model build_args = BuildArgs(model="RedPJ", target="cuda") build_args_as_dict = dataclasses.asdict(build_args) build_args_namespace = argparse.Namespace(**build_args_as_dict) # Namespace that would be created through commandline empty_args = core.convert_build_args_to_argparser() parsed_args = empty_args.parse_args(["--model", "RedPJ", "--target", "cuda"]) self.assertEqual(build_args_namespace, parsed_args) # Modify build_args so that it would not be equivalent build_args = BuildArgs(model="RedPJ", target="vulkan") build_args_as_dict = dataclasses.asdict(build_args) build_args_namespace = argparse.Namespace(**build_args_as_dict) self.assertNotEqual(build_args_namespace, parsed_args)
Tests whether the resulting namespaces from command line entry and Python API entry are equivalent, as they are passed down to the same workflow.
test_namespaces_are_equivalent_str
python
llSourcell/Doctor-Dignity
tests/python/test_build_args.py
https://github.com/llSourcell/Doctor-Dignity/blob/master/tests/python/test_build_args.py
Apache-2.0
def test_namespaces_are_equivalent_str_boolean_int(self): """Same test, but for a mixture of argument types.""" # 1. Equal build_args = BuildArgs(model="RedPJ", max_seq_len=20, debug_dump=True) build_args_as_dict = dataclasses.asdict(build_args) build_args_namespace = argparse.Namespace(**build_args_as_dict) # Namespace that would be created through commandline empty_args = core.convert_build_args_to_argparser() parsed_args = empty_args.parse_args( ["--model", "RedPJ", "--max-seq-len", "20", "--debug-dump"] ) self.assertEqual(build_args_namespace, parsed_args) # 2. Not equal - missing boolean build_args = BuildArgs(model="RedPJ", max_seq_len=20) build_args_as_dict = dataclasses.asdict(build_args) build_args_namespace = argparse.Namespace(**build_args_as_dict) self.assertNotEqual(build_args_namespace, parsed_args) # 3. Not equal - different integer build_args = BuildArgs(model="RedPJ", max_seq_len=18, debug_dump=True) build_args_as_dict = dataclasses.asdict(build_args) build_args_namespace = argparse.Namespace(**build_args_as_dict) self.assertNotEqual(build_args_namespace, parsed_args)
Same test, but for a mixture of argument types.
test_namespaces_are_equivalent_str_boolean_int
python
llSourcell/Doctor-Dignity
tests/python/test_build_args.py
https://github.com/llSourcell/Doctor-Dignity/blob/master/tests/python/test_build_args.py
Apache-2.0
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None): """Set up the miio fan device from config.""" if DATA_KEY not in hass.data: hass.data[DATA_KEY] = {} host = config[CONF_HOST] name = config[CONF_NAME] token = config[CONF_TOKEN] model = config.get(CONF_MODEL) _LOGGER.info("Initializing with host %s (token %s...)", host, token[:5]) unique_id = None if model is None: miio_device = Device(host, token) try: device_info = miio_device.info() except DeviceException: raise PlatformNotReady model = device_info.model unique_id = "{}-{}".format(model, device_info.mac_address) _LOGGER.info( "%s %s %s detected", model, device_info.firmware_version, device_info.hardware_version, ) if model.startswith("nwt.derh."): air_dehumidifier = AirDehumidifier(host, token, model=model) device = XiaomiAirDehumidifier(name, air_dehumidifier, model, unique_id) else: _LOGGER.error( "Unsupported device found! Please create an issue at " "https://github.com/rytilahti/python-miio/issues " "and provide the following data: %s", model, ) return False hass.data[DATA_KEY][host] = device async_add_entities([device], update_before_add=True) async def async_service_handler(service): """Map services to methods on XiaomiAirDehumidifier.""" method = SERVICE_TO_METHOD.get(service.service) params = { key: value for key, value in service.data.items() if key != ATTR_ENTITY_ID } entity_ids = service.data.get(ATTR_ENTITY_ID) if entity_ids: devices = [ device for device in hass.data[DATA_KEY].values() if device.entity_id in entity_ids ] else: devices = hass.data[DATA_KEY].values() update_tasks = [] for device in devices: if not hasattr(device, method["method"]): continue await getattr(device, method["method"])(**params) update_tasks.append(asyncio.create_task(device.async_update_ha_state(True))) if update_tasks: await asyncio.wait(update_tasks) for air_dehumidifier_service in SERVICE_TO_METHOD: schema = SERVICE_TO_METHOD[air_dehumidifier_service].get( "schema", AIRDEHUMIDIFIER_SERVICE_SCHEMA ) hass.services.async_register( DOMAIN, air_dehumidifier_service, async_service_handler, schema=schema )
Set up the miio fan device from config.
async_setup_platform
python
syssi/xiaomi_airpurifier
custom_components/xiaomi_miio_airpurifier/climate.py
https://github.com/syssi/xiaomi_airpurifier/blob/master/custom_components/xiaomi_miio_airpurifier/climate.py
Apache-2.0
async def async_service_handler(service): """Map services to methods on XiaomiAirDehumidifier.""" method = SERVICE_TO_METHOD.get(service.service) params = { key: value for key, value in service.data.items() if key != ATTR_ENTITY_ID } entity_ids = service.data.get(ATTR_ENTITY_ID) if entity_ids: devices = [ device for device in hass.data[DATA_KEY].values() if device.entity_id in entity_ids ] else: devices = hass.data[DATA_KEY].values() update_tasks = [] for device in devices: if not hasattr(device, method["method"]): continue await getattr(device, method["method"])(**params) update_tasks.append(asyncio.create_task(device.async_update_ha_state(True))) if update_tasks: await asyncio.wait(update_tasks)
Map services to methods on XiaomiAirDehumidifier.
async_service_handler
python
syssi/xiaomi_airpurifier
custom_components/xiaomi_miio_airpurifier/climate.py
https://github.com/syssi/xiaomi_airpurifier/blob/master/custom_components/xiaomi_miio_airpurifier/climate.py
Apache-2.0
async def _try_command(self, mask_error, func, *args, **kwargs): """Call a miio device command handling error messages.""" try: result = await self.hass.async_add_executor_job( partial(func, *args, **kwargs) ) except DeviceException as exc: _LOGGER.error(mask_error, exc) self._available = False return False _LOGGER.debug("Response received from miio device: %s", result) return result == SUCCESS
Call a miio device command handling error messages.
_try_command
python
syssi/xiaomi_airpurifier
custom_components/xiaomi_miio_airpurifier/climate.py
https://github.com/syssi/xiaomi_airpurifier/blob/master/custom_components/xiaomi_miio_airpurifier/climate.py
Apache-2.0
def hvac_mode(self): """Return hvac operation ie. heat, cool mode.""" if self.is_on: return HVACMode.DRY return HVACMode.OFF
Return hvac operation ie. heat, cool mode.
hvac_mode
python
syssi/xiaomi_airpurifier
custom_components/xiaomi_miio_airpurifier/climate.py
https://github.com/syssi/xiaomi_airpurifier/blob/master/custom_components/xiaomi_miio_airpurifier/climate.py
Apache-2.0
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None): """Set up the miio fan device from config.""" if DATA_KEY not in hass.data: hass.data[DATA_KEY] = {} host = config[CONF_HOST] token = config[CONF_TOKEN] name = config[CONF_NAME] model = config.get(CONF_MODEL) retries = config[CONF_RETRIES] _LOGGER.info("Initializing with host %s (token %s...)", host, token[:5]) unique_id = None if model is None: try: miio_device = Device(host, token) device_info = await hass.async_add_executor_job(miio_device.info) model = device_info.model unique_id = f"{model}-{device_info.mac_address}" _LOGGER.info( "%s %s %s detected", model, device_info.firmware_version, device_info.hardware_version, ) except DeviceException as ex: raise PlatformNotReady from ex if model in PURIFIER_MIOT: air_purifier = AirPurifierMiot(host, token) device = XiaomiAirPurifierMiot(name, air_purifier, model, unique_id, retries) elif model.startswith("zhimi.airpurifier."): air_purifier = AirPurifier(host, token) device = XiaomiAirPurifier(name, air_purifier, model, unique_id) elif model in HUMIDIFIER_MIOT: air_humidifier = AirHumidifierMiot(host, token) device = XiaomiAirHumidifierMiot(name, air_humidifier, model, unique_id) elif model.startswith("zhimi.humidifier."): air_humidifier = AirHumidifier(host, token, model=model) device = XiaomiAirHumidifier(name, air_humidifier, model, unique_id) elif model in [ MODEL_AIRHUMIDIFIER_MJJSQ, MODEL_AIRHUMIDIFIER_JSQ, MODEL_AIRHUMIDIFIER_JSQ1, ]: air_humidifier = AirHumidifierMjjsq(host, token, model=model) device = XiaomiAirHumidifierMjjsq(name, air_humidifier, model, unique_id) elif model in [ MODEL_AIRHUMIDIFIER_JSQ2W, MODEL_AIRHUMIDIFIER_JSQ3, MODEL_AIRHUMIDIFIER_JSQ5, MODEL_AIRHUMIDIFIER_JSQS, ]: air_humidifier = AirHumidifierJsqs(host, token, model=model) device = XiaomiAirHumidifierJsqs(name, air_humidifier, model, unique_id) elif model == MODEL_AIRHUMIDIFIER_JSQ001: air_humidifier = AirHumidifierJsq(host, token, model=model) device = XiaomiAirHumidifierJsq(name, air_humidifier, model, unique_id) elif model.startswith("zhimi.airfresh."): air_fresh = AirFresh(host, token, model=model) device = XiaomiAirFresh(name, air_fresh, model, unique_id) elif model == MODEL_AIRFRESH_A1: air_fresh = AirFreshA1(host, token, model=model) device = XiaomiAirFreshA1(name, air_fresh, model, unique_id) elif model == MODEL_AIRFRESH_T2017: air_fresh = AirFreshT2017(host, token, model=model) device = XiaomiAirFreshT2017(name, air_fresh, model, unique_id) elif model in [ MODEL_FAN_V2, MODEL_FAN_V3, MODEL_FAN_SA1, MODEL_FAN_ZA1, MODEL_FAN_ZA3, MODEL_FAN_ZA4, ]: fan = Fan(host, token, model=model) device = XiaomiFan(name, fan, model, unique_id, retries) elif model == MODEL_FAN_P5: fan = FanP5(host, token, model=model) device = XiaomiFanP5(name, fan, model, unique_id, retries) elif model in [MODEL_FAN_P10, MODEL_FAN_P18]: fan = FanMiot(host, token, model=MODEL_FAN_P10) device = XiaomiFanMiot(name, fan, model, unique_id, retries) elif model in [MODEL_FAN_P9, MODEL_FAN_P11]: fan = FanMiot(host, token, model=model) device = XiaomiFanMiot(name, fan, model, unique_id, retries) elif model == MODEL_FAN_LESHOW_SS4: fan = FanLeshow(host, token, model=model) device = XiaomiFanLeshow(name, fan, model, unique_id, retries) elif model in [ MODEL_AIRPURIFIER_AIRDOG_X3, MODEL_AIRPURIFIER_AIRDOG_X5, MODEL_AIRPURIFIER_AIRDOG_X7SM, ]: air_purifier = AirDogX3(host, token, model=model) device = XiaomiAirDog(name, air_purifier, model, unique_id, retries) elif model in [MODEL_FAN_1C, MODEL_FAN_P8]: fan = Fan1C(host, token, model=model) device = XiaomiFan1C(name, fan, model, unique_id, retries) else: _LOGGER.error( "Unsupported device found! Please create an issue at " "https://github.com/syssi/xiaomi_airpurifier/issues " "and provide the following data: %s", model, ) return False hass.data[DATA_KEY][host] = device async_add_entities([device], update_before_add=True) async def async_service_handler(service): """Map services to methods on XiaomiAirPurifier.""" method = SERVICE_TO_METHOD.get(service.service) params = { key: value for key, value in service.data.items() if key != ATTR_ENTITY_ID } entity_ids = service.data.get(ATTR_ENTITY_ID) if entity_ids: devices = [ device for device in hass.data[DATA_KEY].values() if device.entity_id in entity_ids ] else: devices = hass.data[DATA_KEY].values() update_tasks = [] for device in devices: if not hasattr(device, method["method"]): continue await getattr(device, method["method"])(**params) update_tasks.append(asyncio.create_task(device.async_update_ha_state(True))) if update_tasks: await asyncio.wait(update_tasks) for air_purifier_service in SERVICE_TO_METHOD: schema = SERVICE_TO_METHOD[air_purifier_service].get( "schema", AIRPURIFIER_SERVICE_SCHEMA ) hass.services.async_register( DOMAIN, air_purifier_service, async_service_handler, schema=schema )
Set up the miio fan device from config.
async_setup_platform
python
syssi/xiaomi_airpurifier
custom_components/xiaomi_miio_airpurifier/fan.py
https://github.com/syssi/xiaomi_airpurifier/blob/master/custom_components/xiaomi_miio_airpurifier/fan.py
Apache-2.0
async def async_service_handler(service): """Map services to methods on XiaomiAirPurifier.""" method = SERVICE_TO_METHOD.get(service.service) params = { key: value for key, value in service.data.items() if key != ATTR_ENTITY_ID } entity_ids = service.data.get(ATTR_ENTITY_ID) if entity_ids: devices = [ device for device in hass.data[DATA_KEY].values() if device.entity_id in entity_ids ] else: devices = hass.data[DATA_KEY].values() update_tasks = [] for device in devices: if not hasattr(device, method["method"]): continue await getattr(device, method["method"])(**params) update_tasks.append(asyncio.create_task(device.async_update_ha_state(True))) if update_tasks: await asyncio.wait(update_tasks)
Map services to methods on XiaomiAirPurifier.
async_service_handler
python
syssi/xiaomi_airpurifier
custom_components/xiaomi_miio_airpurifier/fan.py
https://github.com/syssi/xiaomi_airpurifier/blob/master/custom_components/xiaomi_miio_airpurifier/fan.py
Apache-2.0
async def _try_command(self, mask_error, func, *args, **kwargs): """Call a miio device command handling error messages.""" try: result = await self.hass.async_add_executor_job( partial(func, *args, **kwargs) ) _LOGGER.debug("Response received from miio device: %s", result) return result == SUCCESS except DeviceException as exc: _LOGGER.error(mask_error, exc) self._available = False return False
Call a miio device command handling error messages.
_try_command
python
syssi/xiaomi_airpurifier
custom_components/xiaomi_miio_airpurifier/fan.py
https://github.com/syssi/xiaomi_airpurifier/blob/master/custom_components/xiaomi_miio_airpurifier/fan.py
Apache-2.0
async def async_set_preset_mode(self, preset_mode: str) -> None: """Set the preset mode of the fan.""" _LOGGER.debug("Setting the preset mode to: %s", preset_mode) await self._try_command( "Setting preset mode of the miio device failed.", self._device.set_mode, AirpurifierOperationMode[preset_mode.title()], )
Set the preset mode of the fan.
async_set_preset_mode
python
syssi/xiaomi_airpurifier
custom_components/xiaomi_miio_airpurifier/fan.py
https://github.com/syssi/xiaomi_airpurifier/blob/master/custom_components/xiaomi_miio_airpurifier/fan.py
Apache-2.0
async def async_reset_filter(self): """Reset the filter lifetime and usage.""" if self._device_features & FEATURE_RESET_FILTER == 0: return await self._try_command( "Resetting the filter lifetime of the miio device failed.", self._device.reset_filter, )
Reset the filter lifetime and usage.
async_reset_filter
python
syssi/xiaomi_airpurifier
custom_components/xiaomi_miio_airpurifier/fan.py
https://github.com/syssi/xiaomi_airpurifier/blob/master/custom_components/xiaomi_miio_airpurifier/fan.py
Apache-2.0
async def async_set_preset_mode(self, preset_mode: str) -> None: """Set the preset mode of the fan.""" _LOGGER.debug("Setting the preset mode to: %s", preset_mode) await self._try_command( "Setting preset mode of the miio device failed.", self._device.set_mode, AirpurifierMiotOperationMode[preset_mode.title()], )
Set the preset mode of the fan.
async_set_preset_mode
python
syssi/xiaomi_airpurifier
custom_components/xiaomi_miio_airpurifier/fan.py
https://github.com/syssi/xiaomi_airpurifier/blob/master/custom_components/xiaomi_miio_airpurifier/fan.py
Apache-2.0
async def async_set_preset_mode(self, preset_mode: str) -> None: """Set the preset mode of the fan.""" _LOGGER.debug("Setting the preset mode to: %s", preset_mode) await self._try_command( "Setting preset mode of the miio device failed.", self._device.set_mode, AirhumidifierOperationMode[preset_mode.title()], )
Set the preset mode of the fan.
async_set_preset_mode
python
syssi/xiaomi_airpurifier
custom_components/xiaomi_miio_airpurifier/fan.py
https://github.com/syssi/xiaomi_airpurifier/blob/master/custom_components/xiaomi_miio_airpurifier/fan.py
Apache-2.0
async def async_set_preset_mode(self, preset_mode: str) -> None: """Set the preset mode of the fan.""" _LOGGER.debug("Setting the preset mode to: %s", preset_mode) await self._try_command( "Setting preset mode of the miio device failed.", self._device.set_mode, AirhumidifierMiotOperationMode[preset_mode.title()], )
Set the preset mode of the fan.
async_set_preset_mode
python
syssi/xiaomi_airpurifier
custom_components/xiaomi_miio_airpurifier/fan.py
https://github.com/syssi/xiaomi_airpurifier/blob/master/custom_components/xiaomi_miio_airpurifier/fan.py
Apache-2.0