sangche commited on
Commit
7aff9a8
·
verified ·
1 Parent(s): b33cec5

Upload 360 files

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. api_server/__init__.py +0 -0
  2. api_server/routes/__init__.py +0 -0
  3. api_server/routes/internal/README.md +3 -0
  4. api_server/routes/internal/__init__.py +0 -0
  5. api_server/routes/internal/internal_routes.py +75 -0
  6. api_server/services/__init__.py +0 -0
  7. api_server/services/file_service.py +13 -0
  8. api_server/services/terminal_service.py +60 -0
  9. api_server/utils/file_operations.py +42 -0
  10. app/__init__.py +0 -0
  11. app/app_settings.py +59 -0
  12. app/custom_node_manager.py +34 -0
  13. app/frontend_management.py +204 -0
  14. app/logger.py +84 -0
  15. app/model_manager.py +184 -0
  16. app/user_manager.py +330 -0
  17. bottle.py +233 -0
  18. comfy/checkpoint_pickle.py +13 -0
  19. comfy/cldm/cldm.py +433 -0
  20. comfy/cldm/control_types.py +10 -0
  21. comfy/cldm/dit_embedder.py +120 -0
  22. comfy/cldm/mmdit.py +81 -0
  23. comfy/cli_args.py +190 -0
  24. comfy/clip_config_bigg.json +23 -0
  25. comfy/clip_model.py +218 -0
  26. comfy/clip_vision.py +129 -0
  27. comfy/clip_vision_config_g.json +18 -0
  28. comfy/clip_vision_config_h.json +18 -0
  29. comfy/clip_vision_config_vitl.json +18 -0
  30. comfy/clip_vision_config_vitl_336.json +18 -0
  31. comfy/clip_vision_siglip_384.json +13 -0
  32. comfy/comfy_types/README.md +43 -0
  33. comfy/comfy_types/__init__.py +45 -0
  34. comfy/comfy_types/examples/example_nodes.py +28 -0
  35. comfy/comfy_types/examples/input_options.png +0 -0
  36. comfy/comfy_types/examples/input_types.png +0 -0
  37. comfy/comfy_types/examples/required_hint.png +0 -0
  38. comfy/comfy_types/node_typing.py +274 -0
  39. comfy/conds.py +83 -0
  40. comfy/controlnet.py +862 -0
  41. comfy/diffusers_convert.py +288 -0
  42. comfy/diffusers_load.py +36 -0
  43. comfy/extra_samplers/uni_pc.py +873 -0
  44. comfy/float.py +67 -0
  45. comfy/gligen.py +344 -0
  46. comfy/hooks.py +785 -0
  47. comfy/k_diffusion/deis.py +120 -0
  48. comfy/k_diffusion/sampling.py +1338 -0
  49. comfy/k_diffusion/utils.py +313 -0
  50. comfy/latent_formats.py +409 -0
api_server/__init__.py ADDED
File without changes
api_server/routes/__init__.py ADDED
File without changes
api_server/routes/internal/README.md ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ # ComfyUI Internal Routes
2
+
3
+ All routes under the `/internal` path are designated for **internal use by ComfyUI only**. These routes are not intended for use by external applications may change at any time without notice.
api_server/routes/internal/__init__.py ADDED
File without changes
api_server/routes/internal/internal_routes.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from aiohttp import web
2
+ from typing import Optional
3
+ from folder_paths import models_dir, user_directory, output_directory, folder_names_and_paths
4
+ from api_server.services.file_service import FileService
5
+ from api_server.services.terminal_service import TerminalService
6
+ import app.logger
7
+
8
+ class InternalRoutes:
9
+ '''
10
+ The top level web router for internal routes: /internal/*
11
+ The endpoints here should NOT be depended upon. It is for ComfyUI frontend use only.
12
+ Check README.md for more information.
13
+ '''
14
+
15
+ def __init__(self, prompt_server):
16
+ self.routes: web.RouteTableDef = web.RouteTableDef()
17
+ self._app: Optional[web.Application] = None
18
+ self.file_service = FileService({
19
+ "models": models_dir,
20
+ "user": user_directory,
21
+ "output": output_directory
22
+ })
23
+ self.prompt_server = prompt_server
24
+ self.terminal_service = TerminalService(prompt_server)
25
+
26
+ def setup_routes(self):
27
+ @self.routes.get('/files')
28
+ async def list_files(request):
29
+ directory_key = request.query.get('directory', '')
30
+ try:
31
+ file_list = self.file_service.list_files(directory_key)
32
+ return web.json_response({"files": file_list})
33
+ except ValueError as e:
34
+ return web.json_response({"error": str(e)}, status=400)
35
+ except Exception as e:
36
+ return web.json_response({"error": str(e)}, status=500)
37
+
38
+ @self.routes.get('/logs')
39
+ async def get_logs(request):
40
+ return web.json_response("".join([(l["t"] + " - " + l["m"]) for l in app.logger.get_logs()]))
41
+
42
+ @self.routes.get('/logs/raw')
43
+ async def get_raw_logs(request):
44
+ self.terminal_service.update_size()
45
+ return web.json_response({
46
+ "entries": list(app.logger.get_logs()),
47
+ "size": {"cols": self.terminal_service.cols, "rows": self.terminal_service.rows}
48
+ })
49
+
50
+ @self.routes.patch('/logs/subscribe')
51
+ async def subscribe_logs(request):
52
+ json_data = await request.json()
53
+ client_id = json_data["clientId"]
54
+ enabled = json_data["enabled"]
55
+ if enabled:
56
+ self.terminal_service.subscribe(client_id)
57
+ else:
58
+ self.terminal_service.unsubscribe(client_id)
59
+
60
+ return web.Response(status=200)
61
+
62
+
63
+ @self.routes.get('/folder_paths')
64
+ async def get_folder_paths(request):
65
+ response = {}
66
+ for key in folder_names_and_paths:
67
+ response[key] = folder_names_and_paths[key][0]
68
+ return web.json_response(response)
69
+
70
+ def get_app(self):
71
+ if self._app is None:
72
+ self._app = web.Application()
73
+ self.setup_routes()
74
+ self._app.add_routes(self.routes)
75
+ return self._app
api_server/services/__init__.py ADDED
File without changes
api_server/services/file_service.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Dict, List, Optional
2
+ from api_server.utils.file_operations import FileSystemOperations, FileSystemItem
3
+
4
+ class FileService:
5
+ def __init__(self, allowed_directories: Dict[str, str], file_system_ops: Optional[FileSystemOperations] = None):
6
+ self.allowed_directories: Dict[str, str] = allowed_directories
7
+ self.file_system_ops: FileSystemOperations = file_system_ops or FileSystemOperations()
8
+
9
+ def list_files(self, directory_key: str) -> List[FileSystemItem]:
10
+ if directory_key not in self.allowed_directories:
11
+ raise ValueError("Invalid directory key")
12
+ directory_path: str = self.allowed_directories[directory_key]
13
+ return self.file_system_ops.walk_directory(directory_path)
api_server/services/terminal_service.py ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from app.logger import on_flush
2
+ import os
3
+ import shutil
4
+
5
+
6
+ class TerminalService:
7
+ def __init__(self, server):
8
+ self.server = server
9
+ self.cols = None
10
+ self.rows = None
11
+ self.subscriptions = set()
12
+ on_flush(self.send_messages)
13
+
14
+ def get_terminal_size(self):
15
+ try:
16
+ size = os.get_terminal_size()
17
+ return (size.columns, size.lines)
18
+ except OSError:
19
+ try:
20
+ size = shutil.get_terminal_size()
21
+ return (size.columns, size.lines)
22
+ except OSError:
23
+ return (80, 24) # fallback to 80x24
24
+
25
+ def update_size(self):
26
+ columns, lines = self.get_terminal_size()
27
+ changed = False
28
+
29
+ if columns != self.cols:
30
+ self.cols = columns
31
+ changed = True
32
+
33
+ if lines != self.rows:
34
+ self.rows = lines
35
+ changed = True
36
+
37
+ if changed:
38
+ return {"cols": self.cols, "rows": self.rows}
39
+
40
+ return None
41
+
42
+ def subscribe(self, client_id):
43
+ self.subscriptions.add(client_id)
44
+
45
+ def unsubscribe(self, client_id):
46
+ self.subscriptions.discard(client_id)
47
+
48
+ def send_messages(self, entries):
49
+ if not len(entries) or not len(self.subscriptions):
50
+ return
51
+
52
+ new_size = self.update_size()
53
+
54
+ for client_id in self.subscriptions.copy(): # prevent: Set changed size during iteration
55
+ if client_id not in self.server.sockets:
56
+ # Automatically unsub if the socket has disconnected
57
+ self.unsubscribe(client_id)
58
+ continue
59
+
60
+ self.server.send_sync("logs", {"entries": entries, "size": new_size}, client_id)
api_server/utils/file_operations.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from typing import List, Union, TypedDict, Literal
3
+ from typing_extensions import TypeGuard
4
+ class FileInfo(TypedDict):
5
+ name: str
6
+ path: str
7
+ type: Literal["file"]
8
+ size: int
9
+
10
+ class DirectoryInfo(TypedDict):
11
+ name: str
12
+ path: str
13
+ type: Literal["directory"]
14
+
15
+ FileSystemItem = Union[FileInfo, DirectoryInfo]
16
+
17
+ def is_file_info(item: FileSystemItem) -> TypeGuard[FileInfo]:
18
+ return item["type"] == "file"
19
+
20
+ class FileSystemOperations:
21
+ @staticmethod
22
+ def walk_directory(directory: str) -> List[FileSystemItem]:
23
+ file_list: List[FileSystemItem] = []
24
+ for root, dirs, files in os.walk(directory):
25
+ for name in files:
26
+ file_path = os.path.join(root, name)
27
+ relative_path = os.path.relpath(file_path, directory)
28
+ file_list.append({
29
+ "name": name,
30
+ "path": relative_path,
31
+ "type": "file",
32
+ "size": os.path.getsize(file_path)
33
+ })
34
+ for name in dirs:
35
+ dir_path = os.path.join(root, name)
36
+ relative_path = os.path.relpath(dir_path, directory)
37
+ file_list.append({
38
+ "name": name,
39
+ "path": relative_path,
40
+ "type": "directory"
41
+ })
42
+ return file_list
app/__init__.py ADDED
File without changes
app/app_settings.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ from aiohttp import web
4
+ import logging
5
+
6
+
7
+ class AppSettings():
8
+ def __init__(self, user_manager):
9
+ self.user_manager = user_manager
10
+
11
+ def get_settings(self, request):
12
+ file = self.user_manager.get_request_user_filepath(
13
+ request, "comfy.settings.json")
14
+ if os.path.isfile(file):
15
+ try:
16
+ with open(file) as f:
17
+ return json.load(f)
18
+ except:
19
+ logging.error(f"The user settings file is corrupted: {file}")
20
+ return {}
21
+ else:
22
+ return {}
23
+
24
+ def save_settings(self, request, settings):
25
+ file = self.user_manager.get_request_user_filepath(
26
+ request, "comfy.settings.json")
27
+ with open(file, "w") as f:
28
+ f.write(json.dumps(settings, indent=4))
29
+
30
+ def add_routes(self, routes):
31
+ @routes.get("/settings")
32
+ async def get_settings(request):
33
+ return web.json_response(self.get_settings(request))
34
+
35
+ @routes.get("/settings/{id}")
36
+ async def get_setting(request):
37
+ value = None
38
+ settings = self.get_settings(request)
39
+ setting_id = request.match_info.get("id", None)
40
+ if setting_id and setting_id in settings:
41
+ value = settings[setting_id]
42
+ return web.json_response(value)
43
+
44
+ @routes.post("/settings")
45
+ async def post_settings(request):
46
+ settings = self.get_settings(request)
47
+ new_settings = await request.json()
48
+ self.save_settings(request, {**settings, **new_settings})
49
+ return web.Response(status=200)
50
+
51
+ @routes.post("/settings/{id}")
52
+ async def post_setting(request):
53
+ setting_id = request.match_info.get("id", None)
54
+ if not setting_id:
55
+ return web.Response(status=400)
56
+ settings = self.get_settings(request)
57
+ settings[setting_id] = await request.json()
58
+ self.save_settings(request, settings)
59
+ return web.Response(status=200)
app/custom_node_manager.py ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import os
4
+ import folder_paths
5
+ import glob
6
+ from aiohttp import web
7
+
8
+ class CustomNodeManager:
9
+ """
10
+ Placeholder to refactor the custom node management features from ComfyUI-Manager.
11
+ Currently it only contains the custom workflow templates feature.
12
+ """
13
+ def add_routes(self, routes, webapp, loadedModules):
14
+
15
+ @routes.get("/workflow_templates")
16
+ async def get_workflow_templates(request):
17
+ """Returns a web response that contains the map of custom_nodes names and their associated workflow templates. The ones without templates are omitted."""
18
+ files = [
19
+ file
20
+ for folder in folder_paths.get_folder_paths("custom_nodes")
21
+ for file in glob.glob(os.path.join(folder, '*/example_workflows/*.json'))
22
+ ]
23
+ workflow_templates_dict = {} # custom_nodes folder name -> example workflow names
24
+ for file in files:
25
+ custom_nodes_name = os.path.basename(os.path.dirname(os.path.dirname(file)))
26
+ workflow_name = os.path.splitext(os.path.basename(file))[0]
27
+ workflow_templates_dict.setdefault(custom_nodes_name, []).append(workflow_name)
28
+ return web.json_response(workflow_templates_dict)
29
+
30
+ # Serve workflow templates from custom nodes.
31
+ for module_name, module_dir in loadedModules:
32
+ workflows_dir = os.path.join(module_dir, 'example_workflows')
33
+ if os.path.exists(workflows_dir):
34
+ webapp.add_routes([web.static('/api/workflow_templates/' + module_name, workflows_dir)])
app/frontend_management.py ADDED
@@ -0,0 +1,204 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+ import argparse
3
+ import logging
4
+ import os
5
+ import re
6
+ import tempfile
7
+ import zipfile
8
+ from dataclasses import dataclass
9
+ from functools import cached_property
10
+ from pathlib import Path
11
+ from typing import TypedDict, Optional
12
+
13
+ import requests
14
+ from typing_extensions import NotRequired
15
+ from comfy.cli_args import DEFAULT_VERSION_STRING
16
+
17
+
18
+ REQUEST_TIMEOUT = 10 # seconds
19
+
20
+
21
+ class Asset(TypedDict):
22
+ url: str
23
+
24
+
25
+ class Release(TypedDict):
26
+ id: int
27
+ tag_name: str
28
+ name: str
29
+ prerelease: bool
30
+ created_at: str
31
+ published_at: str
32
+ body: str
33
+ assets: NotRequired[list[Asset]]
34
+
35
+
36
+ @dataclass
37
+ class FrontEndProvider:
38
+ owner: str
39
+ repo: str
40
+
41
+ @property
42
+ def folder_name(self) -> str:
43
+ return f"{self.owner}_{self.repo}"
44
+
45
+ @property
46
+ def release_url(self) -> str:
47
+ return f"https://api.github.com/repos/{self.owner}/{self.repo}/releases"
48
+
49
+ @cached_property
50
+ def all_releases(self) -> list[Release]:
51
+ releases = []
52
+ api_url = self.release_url
53
+ while api_url:
54
+ response = requests.get(api_url, timeout=REQUEST_TIMEOUT)
55
+ response.raise_for_status() # Raises an HTTPError if the response was an error
56
+ releases.extend(response.json())
57
+ # GitHub uses the Link header to provide pagination links. Check if it exists and update api_url accordingly.
58
+ if "next" in response.links:
59
+ api_url = response.links["next"]["url"]
60
+ else:
61
+ api_url = None
62
+ return releases
63
+
64
+ @cached_property
65
+ def latest_release(self) -> Release:
66
+ latest_release_url = f"{self.release_url}/latest"
67
+ response = requests.get(latest_release_url, timeout=REQUEST_TIMEOUT)
68
+ response.raise_for_status() # Raises an HTTPError if the response was an error
69
+ return response.json()
70
+
71
+ def get_release(self, version: str) -> Release:
72
+ if version == "latest":
73
+ return self.latest_release
74
+ else:
75
+ for release in self.all_releases:
76
+ if release["tag_name"] in [version, f"v{version}"]:
77
+ return release
78
+ raise ValueError(f"Version {version} not found in releases")
79
+
80
+
81
+ def download_release_asset_zip(release: Release, destination_path: str) -> None:
82
+ """Download dist.zip from github release."""
83
+ asset_url = None
84
+ for asset in release.get("assets", []):
85
+ if asset["name"] == "dist.zip":
86
+ asset_url = asset["url"]
87
+ break
88
+
89
+ if not asset_url:
90
+ raise ValueError("dist.zip not found in the release assets")
91
+
92
+ # Use a temporary file to download the zip content
93
+ with tempfile.TemporaryFile() as tmp_file:
94
+ headers = {"Accept": "application/octet-stream"}
95
+ response = requests.get(
96
+ asset_url, headers=headers, allow_redirects=True, timeout=REQUEST_TIMEOUT
97
+ )
98
+ response.raise_for_status() # Ensure we got a successful response
99
+
100
+ # Write the content to the temporary file
101
+ tmp_file.write(response.content)
102
+
103
+ # Go back to the beginning of the temporary file
104
+ tmp_file.seek(0)
105
+
106
+ # Extract the zip file content to the destination path
107
+ with zipfile.ZipFile(tmp_file, "r") as zip_ref:
108
+ zip_ref.extractall(destination_path)
109
+
110
+
111
+ class FrontendManager:
112
+ DEFAULT_FRONTEND_PATH = str(Path(__file__).parents[1] / "web")
113
+ CUSTOM_FRONTENDS_ROOT = str(Path(__file__).parents[1] / "web_custom_versions")
114
+
115
+ @classmethod
116
+ def parse_version_string(cls, value: str) -> tuple[str, str, str]:
117
+ """
118
+ Args:
119
+ value (str): The version string to parse.
120
+
121
+ Returns:
122
+ tuple[str, str]: A tuple containing provider name and version.
123
+
124
+ Raises:
125
+ argparse.ArgumentTypeError: If the version string is invalid.
126
+ """
127
+ VERSION_PATTERN = r"^([a-zA-Z0-9][a-zA-Z0-9-]{0,38})/([a-zA-Z0-9_.-]+)@(v?\d+\.\d+\.\d+|latest)$"
128
+ match_result = re.match(VERSION_PATTERN, value)
129
+ if match_result is None:
130
+ raise argparse.ArgumentTypeError(f"Invalid version string: {value}")
131
+
132
+ return match_result.group(1), match_result.group(2), match_result.group(3)
133
+
134
+ @classmethod
135
+ def init_frontend_unsafe(cls, version_string: str, provider: Optional[FrontEndProvider] = None) -> str:
136
+ """
137
+ Initializes the frontend for the specified version.
138
+
139
+ Args:
140
+ version_string (str): The version string.
141
+ provider (FrontEndProvider, optional): The provider to use. Defaults to None.
142
+
143
+ Returns:
144
+ str: The path to the initialized frontend.
145
+
146
+ Raises:
147
+ Exception: If there is an error during the initialization process.
148
+ main error source might be request timeout or invalid URL.
149
+ """
150
+ if version_string == DEFAULT_VERSION_STRING:
151
+ return cls.DEFAULT_FRONTEND_PATH
152
+
153
+ repo_owner, repo_name, version = cls.parse_version_string(version_string)
154
+
155
+ if version.startswith("v"):
156
+ expected_path = str(Path(cls.CUSTOM_FRONTENDS_ROOT) / f"{repo_owner}_{repo_name}" / version.lstrip("v"))
157
+ if os.path.exists(expected_path):
158
+ logging.info(f"Using existing copy of specific frontend version tag: {repo_owner}/{repo_name}@{version}")
159
+ return expected_path
160
+
161
+ logging.info(f"Initializing frontend: {repo_owner}/{repo_name}@{version}, requesting version details from GitHub...")
162
+
163
+ provider = provider or FrontEndProvider(repo_owner, repo_name)
164
+ release = provider.get_release(version)
165
+
166
+ semantic_version = release["tag_name"].lstrip("v")
167
+ web_root = str(
168
+ Path(cls.CUSTOM_FRONTENDS_ROOT) / provider.folder_name / semantic_version
169
+ )
170
+ if not os.path.exists(web_root):
171
+ try:
172
+ os.makedirs(web_root, exist_ok=True)
173
+ logging.info(
174
+ "Downloading frontend(%s) version(%s) to (%s)",
175
+ provider.folder_name,
176
+ semantic_version,
177
+ web_root,
178
+ )
179
+ logging.debug(release)
180
+ download_release_asset_zip(release, destination_path=web_root)
181
+ finally:
182
+ # Clean up the directory if it is empty, i.e. the download failed
183
+ if not os.listdir(web_root):
184
+ os.rmdir(web_root)
185
+
186
+ return web_root
187
+
188
+ @classmethod
189
+ def init_frontend(cls, version_string: str) -> str:
190
+ """
191
+ Initializes the frontend with the specified version string.
192
+
193
+ Args:
194
+ version_string (str): The version string to initialize the frontend with.
195
+
196
+ Returns:
197
+ str: The path of the initialized frontend.
198
+ """
199
+ try:
200
+ return cls.init_frontend_unsafe(version_string)
201
+ except Exception as e:
202
+ logging.error("Failed to initialize frontend: %s", e)
203
+ logging.info("Falling back to the default frontend.")
204
+ return cls.DEFAULT_FRONTEND_PATH
app/logger.py ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections import deque
2
+ from datetime import datetime
3
+ import io
4
+ import logging
5
+ import sys
6
+ import threading
7
+
8
+ logs = None
9
+ stdout_interceptor = None
10
+ stderr_interceptor = None
11
+
12
+
13
+ class LogInterceptor(io.TextIOWrapper):
14
+ def __init__(self, stream, *args, **kwargs):
15
+ buffer = stream.buffer
16
+ encoding = stream.encoding
17
+ super().__init__(buffer, *args, **kwargs, encoding=encoding, line_buffering=stream.line_buffering)
18
+ self._lock = threading.Lock()
19
+ self._flush_callbacks = []
20
+ self._logs_since_flush = []
21
+
22
+ def write(self, data):
23
+ entry = {"t": datetime.now().isoformat(), "m": data}
24
+ with self._lock:
25
+ self._logs_since_flush.append(entry)
26
+
27
+ # Simple handling for cr to overwrite the last output if it isnt a full line
28
+ # else logs just get full of progress messages
29
+ if isinstance(data, str) and data.startswith("\r") and not logs[-1]["m"].endswith("\n"):
30
+ logs.pop()
31
+ logs.append(entry)
32
+ super().write(data)
33
+
34
+ def flush(self):
35
+ super().flush()
36
+ for cb in self._flush_callbacks:
37
+ cb(self._logs_since_flush)
38
+ self._logs_since_flush = []
39
+
40
+ def on_flush(self, callback):
41
+ self._flush_callbacks.append(callback)
42
+
43
+
44
+ def get_logs():
45
+ return logs
46
+
47
+
48
+ def on_flush(callback):
49
+ if stdout_interceptor is not None:
50
+ stdout_interceptor.on_flush(callback)
51
+ if stderr_interceptor is not None:
52
+ stderr_interceptor.on_flush(callback)
53
+
54
+ def setup_logger(log_level: str = 'INFO', capacity: int = 300, use_stdout: bool = False):
55
+ global logs
56
+ if logs:
57
+ return
58
+
59
+ # Override output streams and log to buffer
60
+ logs = deque(maxlen=capacity)
61
+
62
+ global stdout_interceptor
63
+ global stderr_interceptor
64
+ stdout_interceptor = sys.stdout = LogInterceptor(sys.stdout)
65
+ stderr_interceptor = sys.stderr = LogInterceptor(sys.stderr)
66
+
67
+ # Setup default global logger
68
+ logger = logging.getLogger()
69
+ logger.setLevel(log_level)
70
+
71
+ stream_handler = logging.StreamHandler()
72
+ stream_handler.setFormatter(logging.Formatter("%(message)s"))
73
+
74
+ if use_stdout:
75
+ # Only errors and critical to stderr
76
+ stream_handler.addFilter(lambda record: not record.levelno < logging.ERROR)
77
+
78
+ # Lesser to stdout
79
+ stdout_handler = logging.StreamHandler(sys.stdout)
80
+ stdout_handler.setFormatter(logging.Formatter("%(message)s"))
81
+ stdout_handler.addFilter(lambda record: record.levelno < logging.ERROR)
82
+ logger.addHandler(stdout_handler)
83
+
84
+ logger.addHandler(stream_handler)
app/model_manager.py ADDED
@@ -0,0 +1,184 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import os
4
+ import base64
5
+ import json
6
+ import time
7
+ import logging
8
+ import folder_paths
9
+ import glob
10
+ import comfy.utils
11
+ from aiohttp import web
12
+ from PIL import Image
13
+ from io import BytesIO
14
+ from folder_paths import map_legacy, filter_files_extensions, filter_files_content_types
15
+
16
+
17
+ class ModelFileManager:
18
+ def __init__(self) -> None:
19
+ self.cache: dict[str, tuple[list[dict], dict[str, float], float]] = {}
20
+
21
+ def get_cache(self, key: str, default=None) -> tuple[list[dict], dict[str, float], float] | None:
22
+ return self.cache.get(key, default)
23
+
24
+ def set_cache(self, key: str, value: tuple[list[dict], dict[str, float], float]):
25
+ self.cache[key] = value
26
+
27
+ def clear_cache(self):
28
+ self.cache.clear()
29
+
30
+ def add_routes(self, routes):
31
+ # NOTE: This is an experiment to replace `/models`
32
+ @routes.get("/experiment/models")
33
+ async def get_model_folders(request):
34
+ model_types = list(folder_paths.folder_names_and_paths.keys())
35
+ folder_black_list = ["configs", "custom_nodes"]
36
+ output_folders: list[dict] = []
37
+ for folder in model_types:
38
+ if folder in folder_black_list:
39
+ continue
40
+ output_folders.append({"name": folder, "folders": folder_paths.get_folder_paths(folder)})
41
+ return web.json_response(output_folders)
42
+
43
+ # NOTE: This is an experiment to replace `/models/{folder}`
44
+ @routes.get("/experiment/models/{folder}")
45
+ async def get_all_models(request):
46
+ folder = request.match_info.get("folder", None)
47
+ if not folder in folder_paths.folder_names_and_paths:
48
+ return web.Response(status=404)
49
+ files = self.get_model_file_list(folder)
50
+ return web.json_response(files)
51
+
52
+ @routes.get("/experiment/models/preview/{folder}/{path_index}/{filename:.*}")
53
+ async def get_model_preview(request):
54
+ folder_name = request.match_info.get("folder", None)
55
+ path_index = int(request.match_info.get("path_index", None))
56
+ filename = request.match_info.get("filename", None)
57
+
58
+ if not folder_name in folder_paths.folder_names_and_paths:
59
+ return web.Response(status=404)
60
+
61
+ folders = folder_paths.folder_names_and_paths[folder_name]
62
+ folder = folders[0][path_index]
63
+ full_filename = os.path.join(folder, filename)
64
+
65
+ previews = self.get_model_previews(full_filename)
66
+ default_preview = previews[0] if len(previews) > 0 else None
67
+ if default_preview is None or (isinstance(default_preview, str) and not os.path.isfile(default_preview)):
68
+ return web.Response(status=404)
69
+
70
+ try:
71
+ with Image.open(default_preview) as img:
72
+ img_bytes = BytesIO()
73
+ img.save(img_bytes, format="WEBP")
74
+ img_bytes.seek(0)
75
+ return web.Response(body=img_bytes.getvalue(), content_type="image/webp")
76
+ except:
77
+ return web.Response(status=404)
78
+
79
+ def get_model_file_list(self, folder_name: str):
80
+ folder_name = map_legacy(folder_name)
81
+ folders = folder_paths.folder_names_and_paths[folder_name]
82
+ output_list: list[dict] = []
83
+
84
+ for index, folder in enumerate(folders[0]):
85
+ if not os.path.isdir(folder):
86
+ continue
87
+ out = self.cache_model_file_list_(folder)
88
+ if out is None:
89
+ out = self.recursive_search_models_(folder, index)
90
+ self.set_cache(folder, out)
91
+ output_list.extend(out[0])
92
+
93
+ return output_list
94
+
95
+ def cache_model_file_list_(self, folder: str):
96
+ model_file_list_cache = self.get_cache(folder)
97
+
98
+ if model_file_list_cache is None:
99
+ return None
100
+ if not os.path.isdir(folder):
101
+ return None
102
+ if os.path.getmtime(folder) != model_file_list_cache[1]:
103
+ return None
104
+ for x in model_file_list_cache[1]:
105
+ time_modified = model_file_list_cache[1][x]
106
+ folder = x
107
+ if os.path.getmtime(folder) != time_modified:
108
+ return None
109
+
110
+ return model_file_list_cache
111
+
112
+ def recursive_search_models_(self, directory: str, pathIndex: int) -> tuple[list[str], dict[str, float], float]:
113
+ if not os.path.isdir(directory):
114
+ return [], {}, time.perf_counter()
115
+
116
+ excluded_dir_names = [".git"]
117
+ # TODO use settings
118
+ include_hidden_files = False
119
+
120
+ result: list[str] = []
121
+ dirs: dict[str, float] = {}
122
+
123
+ for dirpath, subdirs, filenames in os.walk(directory, followlinks=True, topdown=True):
124
+ subdirs[:] = [d for d in subdirs if d not in excluded_dir_names]
125
+ if not include_hidden_files:
126
+ subdirs[:] = [d for d in subdirs if not d.startswith(".")]
127
+ filenames = [f for f in filenames if not f.startswith(".")]
128
+
129
+ filenames = filter_files_extensions(filenames, folder_paths.supported_pt_extensions)
130
+
131
+ for file_name in filenames:
132
+ try:
133
+ relative_path = os.path.relpath(os.path.join(dirpath, file_name), directory)
134
+ result.append(relative_path)
135
+ except:
136
+ logging.warning(f"Warning: Unable to access {file_name}. Skipping this file.")
137
+ continue
138
+
139
+ for d in subdirs:
140
+ path: str = os.path.join(dirpath, d)
141
+ try:
142
+ dirs[path] = os.path.getmtime(path)
143
+ except FileNotFoundError:
144
+ logging.warning(f"Warning: Unable to access {path}. Skipping this path.")
145
+ continue
146
+
147
+ return [{"name": f, "pathIndex": pathIndex} for f in result], dirs, time.perf_counter()
148
+
149
+ def get_model_previews(self, filepath: str) -> list[str | BytesIO]:
150
+ dirname = os.path.dirname(filepath)
151
+
152
+ if not os.path.exists(dirname):
153
+ return []
154
+
155
+ basename = os.path.splitext(filepath)[0]
156
+ match_files = glob.glob(f"{basename}.*", recursive=False)
157
+ image_files = filter_files_content_types(match_files, "image")
158
+ safetensors_file = next(filter(lambda x: x.endswith(".safetensors"), match_files), None)
159
+ safetensors_metadata = {}
160
+
161
+ result: list[str | BytesIO] = []
162
+
163
+ for filename in image_files:
164
+ _basename = os.path.splitext(filename)[0]
165
+ if _basename == basename:
166
+ result.append(filename)
167
+ if _basename == f"{basename}.preview":
168
+ result.append(filename)
169
+
170
+ if safetensors_file:
171
+ safetensors_filepath = os.path.join(dirname, safetensors_file)
172
+ header = comfy.utils.safetensors_header(safetensors_filepath, max_size=8*1024*1024)
173
+ if header:
174
+ safetensors_metadata = json.loads(header)
175
+ safetensors_images = safetensors_metadata.get("__metadata__", {}).get("ssmd_cover_images", None)
176
+ if safetensors_images:
177
+ safetensors_images = json.loads(safetensors_images)
178
+ for image in safetensors_images:
179
+ result.append(BytesIO(base64.b64decode(image)))
180
+
181
+ return result
182
+
183
+ def __exit__(self, exc_type, exc_value, traceback):
184
+ self.clear_cache()
app/user_manager.py ADDED
@@ -0,0 +1,330 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+ import json
3
+ import os
4
+ import re
5
+ import uuid
6
+ import glob
7
+ import shutil
8
+ import logging
9
+ from aiohttp import web
10
+ from urllib import parse
11
+ from comfy.cli_args import args
12
+ import folder_paths
13
+ from .app_settings import AppSettings
14
+ from typing import TypedDict
15
+
16
+ default_user = "default"
17
+
18
+
19
+ class FileInfo(TypedDict):
20
+ path: str
21
+ size: int
22
+ modified: int
23
+
24
+
25
+ def get_file_info(path: str, relative_to: str) -> FileInfo:
26
+ return {
27
+ "path": os.path.relpath(path, relative_to).replace(os.sep, '/'),
28
+ "size": os.path.getsize(path),
29
+ "modified": os.path.getmtime(path)
30
+ }
31
+
32
+
33
+ class UserManager():
34
+ def __init__(self):
35
+ user_directory = folder_paths.get_user_directory()
36
+
37
+ self.settings = AppSettings(self)
38
+ if not os.path.exists(user_directory):
39
+ os.makedirs(user_directory, exist_ok=True)
40
+ if not args.multi_user:
41
+ logging.warning("****** User settings have been changed to be stored on the server instead of browser storage. ******")
42
+ logging.warning("****** For multi-user setups add the --multi-user CLI argument to enable multiple user profiles. ******")
43
+
44
+ if args.multi_user:
45
+ if os.path.isfile(self.get_users_file()):
46
+ with open(self.get_users_file()) as f:
47
+ self.users = json.load(f)
48
+ else:
49
+ self.users = {}
50
+ else:
51
+ self.users = {"default": "default"}
52
+
53
+ def get_users_file(self):
54
+ return os.path.join(folder_paths.get_user_directory(), "users.json")
55
+
56
+ def get_request_user_id(self, request):
57
+ user = "default"
58
+ if args.multi_user and "comfy-user" in request.headers:
59
+ user = request.headers["comfy-user"]
60
+
61
+ if user not in self.users:
62
+ raise KeyError("Unknown user: " + user)
63
+
64
+ return user
65
+
66
+ def get_request_user_filepath(self, request, file, type="userdata", create_dir=True):
67
+ user_directory = folder_paths.get_user_directory()
68
+
69
+ if type == "userdata":
70
+ root_dir = user_directory
71
+ else:
72
+ raise KeyError("Unknown filepath type:" + type)
73
+
74
+ user = self.get_request_user_id(request)
75
+ path = user_root = os.path.abspath(os.path.join(root_dir, user))
76
+
77
+ # prevent leaving /{type}
78
+ if os.path.commonpath((root_dir, user_root)) != root_dir:
79
+ return None
80
+
81
+ if file is not None:
82
+ # Check if filename is url encoded
83
+ if "%" in file:
84
+ file = parse.unquote(file)
85
+
86
+ # prevent leaving /{type}/{user}
87
+ path = os.path.abspath(os.path.join(user_root, file))
88
+ if os.path.commonpath((user_root, path)) != user_root:
89
+ return None
90
+
91
+ parent = os.path.split(path)[0]
92
+
93
+ if create_dir and not os.path.exists(parent):
94
+ os.makedirs(parent, exist_ok=True)
95
+
96
+ return path
97
+
98
+ def add_user(self, name):
99
+ name = name.strip()
100
+ if not name:
101
+ raise ValueError("username not provided")
102
+ user_id = re.sub("[^a-zA-Z0-9-_]+", '-', name)
103
+ user_id = user_id + "_" + str(uuid.uuid4())
104
+
105
+ self.users[user_id] = name
106
+
107
+ with open(self.get_users_file(), "w") as f:
108
+ json.dump(self.users, f)
109
+
110
+ return user_id
111
+
112
+ def add_routes(self, routes):
113
+ self.settings.add_routes(routes)
114
+
115
+ @routes.get("/users")
116
+ async def get_users(request):
117
+ if args.multi_user:
118
+ return web.json_response({"storage": "server", "users": self.users})
119
+ else:
120
+ user_dir = self.get_request_user_filepath(request, None, create_dir=False)
121
+ return web.json_response({
122
+ "storage": "server",
123
+ "migrated": os.path.exists(user_dir)
124
+ })
125
+
126
+ @routes.post("/users")
127
+ async def post_users(request):
128
+ body = await request.json()
129
+ username = body["username"]
130
+ if username in self.users.values():
131
+ return web.json_response({"error": "Duplicate username."}, status=400)
132
+
133
+ user_id = self.add_user(username)
134
+ return web.json_response(user_id)
135
+
136
+ @routes.get("/userdata")
137
+ async def listuserdata(request):
138
+ """
139
+ List user data files in a specified directory.
140
+
141
+ This endpoint allows listing files in a user's data directory, with options for recursion,
142
+ full file information, and path splitting.
143
+
144
+ Query Parameters:
145
+ - dir (required): The directory to list files from.
146
+ - recurse (optional): If "true", recursively list files in subdirectories.
147
+ - full_info (optional): If "true", return detailed file information (path, size, modified time).
148
+ - split (optional): If "true", split file paths into components (only applies when full_info is false).
149
+
150
+ Returns:
151
+ - 400: If 'dir' parameter is missing.
152
+ - 403: If the requested path is not allowed.
153
+ - 404: If the requested directory does not exist.
154
+ - 200: JSON response with the list of files or file information.
155
+
156
+ The response format depends on the query parameters:
157
+ - Default: List of relative file paths.
158
+ - full_info=true: List of dictionaries with file details.
159
+ - split=true (and full_info=false): List of lists, each containing path components.
160
+ """
161
+ directory = request.rel_url.query.get('dir', '')
162
+ if not directory:
163
+ return web.Response(status=400, text="Directory not provided")
164
+
165
+ path = self.get_request_user_filepath(request, directory)
166
+ if not path:
167
+ return web.Response(status=403, text="Invalid directory")
168
+
169
+ if not os.path.exists(path):
170
+ return web.Response(status=404, text="Directory not found")
171
+
172
+ recurse = request.rel_url.query.get('recurse', '').lower() == "true"
173
+ full_info = request.rel_url.query.get('full_info', '').lower() == "true"
174
+ split_path = request.rel_url.query.get('split', '').lower() == "true"
175
+
176
+ # Use different patterns based on whether we're recursing or not
177
+ if recurse:
178
+ pattern = os.path.join(glob.escape(path), '**', '*')
179
+ else:
180
+ pattern = os.path.join(glob.escape(path), '*')
181
+
182
+ def process_full_path(full_path: str) -> FileInfo | str | list[str]:
183
+ if full_info:
184
+ return get_file_info(full_path, path)
185
+
186
+ rel_path = os.path.relpath(full_path, path).replace(os.sep, '/')
187
+ if split_path:
188
+ return [rel_path] + rel_path.split('/')
189
+
190
+ return rel_path
191
+
192
+ results = [
193
+ process_full_path(full_path)
194
+ for full_path in glob.glob(pattern, recursive=recurse)
195
+ if os.path.isfile(full_path)
196
+ ]
197
+
198
+ return web.json_response(results)
199
+
200
+ def get_user_data_path(request, check_exists = False, param = "file"):
201
+ file = request.match_info.get(param, None)
202
+ if not file:
203
+ return web.Response(status=400)
204
+
205
+ path = self.get_request_user_filepath(request, file)
206
+ if not path:
207
+ return web.Response(status=403)
208
+
209
+ if check_exists and not os.path.exists(path):
210
+ return web.Response(status=404)
211
+
212
+ return path
213
+
214
+ @routes.get("/userdata/{file}")
215
+ async def getuserdata(request):
216
+ path = get_user_data_path(request, check_exists=True)
217
+ if not isinstance(path, str):
218
+ return path
219
+
220
+ return web.FileResponse(path)
221
+
222
+ @routes.post("/userdata/{file}")
223
+ async def post_userdata(request):
224
+ """
225
+ Upload or update a user data file.
226
+
227
+ This endpoint handles file uploads to a user's data directory, with options for
228
+ controlling overwrite behavior and response format.
229
+
230
+ Query Parameters:
231
+ - overwrite (optional): If "false", prevents overwriting existing files. Defaults to "true".
232
+ - full_info (optional): If "true", returns detailed file information (path, size, modified time).
233
+ If "false", returns only the relative file path.
234
+
235
+ Path Parameters:
236
+ - file: The target file path (URL encoded if necessary).
237
+
238
+ Returns:
239
+ - 400: If 'file' parameter is missing.
240
+ - 403: If the requested path is not allowed.
241
+ - 409: If overwrite=false and the file already exists.
242
+ - 200: JSON response with either:
243
+ - Full file information (if full_info=true)
244
+ - Relative file path (if full_info=false)
245
+
246
+ The request body should contain the raw file content to be written.
247
+ """
248
+ path = get_user_data_path(request)
249
+ if not isinstance(path, str):
250
+ return path
251
+
252
+ overwrite = request.query.get("overwrite", 'true') != "false"
253
+ full_info = request.query.get('full_info', 'false').lower() == "true"
254
+
255
+ if not overwrite and os.path.exists(path):
256
+ return web.Response(status=409, text="File already exists")
257
+
258
+ body = await request.read()
259
+
260
+ with open(path, "wb") as f:
261
+ f.write(body)
262
+
263
+ user_path = self.get_request_user_filepath(request, None)
264
+ if full_info:
265
+ resp = get_file_info(path, user_path)
266
+ else:
267
+ resp = os.path.relpath(path, user_path)
268
+
269
+ return web.json_response(resp)
270
+
271
+ @routes.delete("/userdata/{file}")
272
+ async def delete_userdata(request):
273
+ path = get_user_data_path(request, check_exists=True)
274
+ if not isinstance(path, str):
275
+ return path
276
+
277
+ os.remove(path)
278
+
279
+ return web.Response(status=204)
280
+
281
+ @routes.post("/userdata/{file}/move/{dest}")
282
+ async def move_userdata(request):
283
+ """
284
+ Move or rename a user data file.
285
+
286
+ This endpoint handles moving or renaming files within a user's data directory, with options for
287
+ controlling overwrite behavior and response format.
288
+
289
+ Path Parameters:
290
+ - file: The source file path (URL encoded if necessary)
291
+ - dest: The destination file path (URL encoded if necessary)
292
+
293
+ Query Parameters:
294
+ - overwrite (optional): If "false", prevents overwriting existing files. Defaults to "true".
295
+ - full_info (optional): If "true", returns detailed file information (path, size, modified time).
296
+ If "false", returns only the relative file path.
297
+
298
+ Returns:
299
+ - 400: If either 'file' or 'dest' parameter is missing
300
+ - 403: If either requested path is not allowed
301
+ - 404: If the source file does not exist
302
+ - 409: If overwrite=false and the destination file already exists
303
+ - 200: JSON response with either:
304
+ - Full file information (if full_info=true)
305
+ - Relative file path (if full_info=false)
306
+ """
307
+ source = get_user_data_path(request, check_exists=True)
308
+ if not isinstance(source, str):
309
+ return source
310
+
311
+ dest = get_user_data_path(request, check_exists=False, param="dest")
312
+ if not isinstance(source, str):
313
+ return dest
314
+
315
+ overwrite = request.query.get("overwrite", 'true') != "false"
316
+ full_info = request.query.get('full_info', 'false').lower() == "true"
317
+
318
+ if not overwrite and os.path.exists(dest):
319
+ return web.Response(status=409, text="File already exists")
320
+
321
+ logging.info(f"moving '{source}' -> '{dest}'")
322
+ shutil.move(source, dest)
323
+
324
+ user_path = self.get_request_user_filepath(request, None)
325
+ if full_info:
326
+ resp = get_file_info(dest, user_path)
327
+ else:
328
+ resp = os.path.relpath(dest, user_path)
329
+
330
+ return web.json_response(resp)
bottle.py ADDED
@@ -0,0 +1,233 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import random
3
+ import sys
4
+ from typing import Sequence, Mapping, Any, Union
5
+ import torch
6
+ import gradio as gr
7
+ from huggingface_hub import hf_hub_download
8
+ import spaces
9
+ from comfy import model_management
10
+
11
+ hf_hub_download(repo_id="Comfy-Org/stable-diffusion-v1-5-archive", filename="v1-5-pruned-emaonly-fp16.safetensors", local_dir="models/checkpoints")
12
+
13
+ def get_value_at_index(obj: Union[Sequence, Mapping], index: int) -> Any:
14
+ """Returns the value at the given index of a sequence or mapping.
15
+
16
+ If the object is a sequence (like list or string), returns the value at the given index.
17
+ If the object is a mapping (like a dictionary), returns the value at the index-th key.
18
+
19
+ Some return a dictionary, in these cases, we look for the "results" key
20
+
21
+ Args:
22
+ obj (Union[Sequence, Mapping]): The object to retrieve the value from.
23
+ index (int): The index of the value to retrieve.
24
+
25
+ Returns:
26
+ Any: The value at the given index.
27
+
28
+ Raises:
29
+ IndexError: If the index is out of bounds for the object and the object is not a mapping.
30
+ """
31
+ try:
32
+ return obj[index]
33
+ except KeyError:
34
+ return obj["result"][index]
35
+
36
+
37
+ def find_path(name: str, path: str = None) -> str:
38
+ """
39
+ Recursively looks at parent folders starting from the given path until it finds the given name.
40
+ Returns the path as a Path object if found, or None otherwise.
41
+ """
42
+ # If no path is given, use the current working directory
43
+ if path is None:
44
+ path = os.getcwd()
45
+
46
+ # Check if the current directory contains the name
47
+ if name in os.listdir(path):
48
+ path_name = os.path.join(path, name)
49
+ print(f"{name} found: {path_name}")
50
+ return path_name
51
+
52
+ # Get the parent directory
53
+ parent_directory = os.path.dirname(path)
54
+
55
+ # If the parent directory is the same as the current directory, we've reached the root and stop the search
56
+ if parent_directory == path:
57
+ return None
58
+
59
+ # Recursively call the function with the parent directory
60
+ return find_path(name, parent_directory)
61
+
62
+
63
+ def add_comfyui_directory_to_sys_path() -> None:
64
+ """
65
+ Add 'ComfyUI' to the sys.path
66
+ """
67
+ comfyui_path = find_path("ComfyUI")
68
+ if comfyui_path is not None and os.path.isdir(comfyui_path):
69
+ sys.path.append(comfyui_path)
70
+ print(f"'{comfyui_path}' added to sys.path")
71
+
72
+
73
+ def add_extra_model_paths() -> None:
74
+ """
75
+ Parse the optional extra_model_paths.yaml file and add the parsed paths to the sys.path.
76
+ """
77
+ try:
78
+ from main import load_extra_path_config
79
+ except ImportError:
80
+ print(
81
+ "Could not import load_extra_path_config from main.py. Looking in utils.extra_config instead."
82
+ )
83
+ from utils.extra_config import load_extra_path_config
84
+
85
+ extra_model_paths = find_path("extra_model_paths.yaml")
86
+
87
+ if extra_model_paths is not None:
88
+ load_extra_path_config(extra_model_paths)
89
+ else:
90
+ print("Could not find the extra_model_paths config file.")
91
+
92
+
93
+ add_comfyui_directory_to_sys_path()
94
+ add_extra_model_paths()
95
+
96
+
97
+ def import_custom_nodes() -> None:
98
+ """Find all custom nodes in the custom_nodes folder and add those node objects to NODE_CLASS_MAPPINGS
99
+
100
+ This function sets up a new asyncio event loop, initializes the PromptServer,
101
+ creates a PromptQueue, and initializes the custom nodes.
102
+ """
103
+ import asyncio
104
+ import execution
105
+ from nodes import init_extra_nodes
106
+ import server
107
+
108
+ # Creating a new event loop and setting it as the default loop
109
+ loop = asyncio.new_event_loop()
110
+ asyncio.set_event_loop(loop)
111
+
112
+ # Creating an instance of PromptServer with the loop
113
+ server_instance = server.PromptServer(loop)
114
+ execution.PromptQueue(server_instance)
115
+
116
+ # Initializing custom nodes
117
+ init_extra_nodes()
118
+
119
+
120
+ from nodes import NODE_CLASS_MAPPINGS
121
+
122
+ checkpointloadersimple = NODE_CLASS_MAPPINGS["CheckpointLoaderSimple"]()
123
+ checkpointloadersimple_4 = checkpointloadersimple.load_checkpoint(
124
+ ckpt_name="v1-5-pruned-emaonly-fp16.safetensors"
125
+ )
126
+
127
+ #Add all the models that load a safetensors file
128
+ model_loaders = [checkpointloadersimple_4]
129
+
130
+ # Check which models are valid and how to best load them
131
+ valid_models = [
132
+ getattr(loader[0], 'patcher', loader[0])
133
+ for loader in model_loaders
134
+ if not isinstance(loader[0], dict) and not isinstance(getattr(loader[0], 'patcher', None), dict)
135
+ ]
136
+
137
+ #Finally loads the models
138
+ model_management.load_models_gpu(valid_models)
139
+
140
+ @spaces.GPU(duration=60) # modify the duration for the average it takes for your workflow to run, in seconds
141
+ def generate_image(prompt):
142
+ import_custom_nodes()
143
+ with torch.inference_mode():
144
+ # checkpointloadersimple = NODE_CLASS_MAPPINGS["CheckpointLoaderSimple"]()
145
+ # checkpointloadersimple_4 = checkpointloadersimple.load_checkpoint(
146
+ # ckpt_name="v1-5-pruned-emaonly-fp16.safetensors"
147
+ # )
148
+
149
+ emptylatentimage = NODE_CLASS_MAPPINGS["EmptyLatentImage"]()
150
+ emptylatentimage_5 = emptylatentimage.generate(
151
+ width=512, height=512, batch_size=1
152
+ )
153
+
154
+ cliptextencode = NODE_CLASS_MAPPINGS["CLIPTextEncode"]()
155
+ cliptextencode_6 = cliptextencode.encode(
156
+ text=prompt, #"beautiful scenery nature glass bottle landscape, , purple galaxy bottle,",
157
+ clip=get_value_at_index(checkpointloadersimple_4, 1),
158
+ )
159
+
160
+ cliptextencode_7 = cliptextencode.encode(
161
+ text="text, watermark", clip=get_value_at_index(checkpointloadersimple_4, 1)
162
+ )
163
+
164
+ ksampler = NODE_CLASS_MAPPINGS["KSampler"]()
165
+ vaedecode = NODE_CLASS_MAPPINGS["VAEDecode"]()
166
+ saveimage = NODE_CLASS_MAPPINGS["SaveImage"]()
167
+
168
+ for q in range(1):
169
+ ksampler_3 = ksampler.sample(
170
+ seed=random.randint(1, 2**64),
171
+ steps=20,
172
+ cfg=8,
173
+ sampler_name="euler",
174
+ scheduler="normal",
175
+ denoise=1,
176
+ model=get_value_at_index(checkpointloadersimple_4, 0),
177
+ positive=get_value_at_index(cliptextencode_6, 0),
178
+ negative=get_value_at_index(cliptextencode_7, 0),
179
+ latent_image=get_value_at_index(emptylatentimage_5, 0),
180
+ )
181
+
182
+ vaedecode_8 = vaedecode.decode(
183
+ samples=get_value_at_index(ksampler_3, 0),
184
+ vae=get_value_at_index(checkpointloadersimple_4, 2),
185
+ )
186
+
187
+ saveimage_9 = saveimage.save_images(
188
+ filename_prefix="ComfyUI", images=get_value_at_index(vaedecode_8, 0)
189
+ )
190
+
191
+ saved_path = f"output/{saveimage_9['ui']['images'][0]['filename']}"
192
+ return saved_path
193
+
194
+
195
+ if __name__ == "__main__":
196
+ # Comment out the main() call in the exported Python code
197
+
198
+ # Start your Gradio app
199
+ with gr.Blocks() as app:
200
+ # Add a title
201
+ gr.Markdown("# Simple Example")
202
+
203
+ with gr.Row():
204
+ with gr.Column():
205
+ # Add an input
206
+ prompt_input = gr.Textbox(label="Prompt", placeholder="Enter your prompt here...")
207
+ # Add a `Row` to include the groups side by side
208
+ # with gr.Row():
209
+ # # First group includes structure image and depth strength
210
+ # with gr.Group():
211
+ # # structure_image = gr.Image(label="Structure Image", type="filepath")
212
+ # # depth_strength = gr.Slider(minimum=0, maximum=50, value=15, label="Depth Strength")
213
+ # # Second group includes style image and style strength
214
+ # # with gr.Group():
215
+ # # style_image = gr.Image(label="Style Image", type="filepath")
216
+ # # style_strength = gr.Slider(minimum=0, maximum=1, value=0.5, label="Style Strength")
217
+
218
+ # The generate button
219
+ generate_btn = gr.Button("Generate")
220
+
221
+ with gr.Column():
222
+ # The output image
223
+ output_image = gr.Image(label="Generated Image")
224
+
225
+ # When clicking the button, it will trigger the `generate_image` function, with the respective inputs
226
+ # and the output an image
227
+ generate_btn.click(
228
+ fn=generate_image,
229
+ # inputs=[prompt_input, structure_image, style_image, depth_strength, style_strength],
230
+ inputs=[prompt_input],
231
+ outputs=[output_image]
232
+ )
233
+ app.launch(share=True)
comfy/checkpoint_pickle.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pickle
2
+
3
+ load = pickle.load
4
+
5
+ class Empty:
6
+ pass
7
+
8
+ class Unpickler(pickle.Unpickler):
9
+ def find_class(self, module, name):
10
+ #TODO: safe unpickle
11
+ if module.startswith("pytorch_lightning"):
12
+ return Empty
13
+ return super().find_class(module, name)
comfy/cldm/cldm.py ADDED
@@ -0,0 +1,433 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #taken from: https://github.com/lllyasviel/ControlNet
2
+ #and modified
3
+
4
+ import torch
5
+ import torch.nn as nn
6
+
7
+ from ..ldm.modules.diffusionmodules.util import (
8
+ timestep_embedding,
9
+ )
10
+
11
+ from ..ldm.modules.attention import SpatialTransformer
12
+ from ..ldm.modules.diffusionmodules.openaimodel import UNetModel, TimestepEmbedSequential, ResBlock, Downsample
13
+ from ..ldm.util import exists
14
+ from .control_types import UNION_CONTROLNET_TYPES
15
+ from collections import OrderedDict
16
+ import comfy.ops
17
+ from comfy.ldm.modules.attention import optimized_attention
18
+
19
+ class OptimizedAttention(nn.Module):
20
+ def __init__(self, c, nhead, dropout=0.0, dtype=None, device=None, operations=None):
21
+ super().__init__()
22
+ self.heads = nhead
23
+ self.c = c
24
+
25
+ self.in_proj = operations.Linear(c, c * 3, bias=True, dtype=dtype, device=device)
26
+ self.out_proj = operations.Linear(c, c, bias=True, dtype=dtype, device=device)
27
+
28
+ def forward(self, x):
29
+ x = self.in_proj(x)
30
+ q, k, v = x.split(self.c, dim=2)
31
+ out = optimized_attention(q, k, v, self.heads)
32
+ return self.out_proj(out)
33
+
34
+ class QuickGELU(nn.Module):
35
+ def forward(self, x: torch.Tensor):
36
+ return x * torch.sigmoid(1.702 * x)
37
+
38
+ class ResBlockUnionControlnet(nn.Module):
39
+ def __init__(self, dim, nhead, dtype=None, device=None, operations=None):
40
+ super().__init__()
41
+ self.attn = OptimizedAttention(dim, nhead, dtype=dtype, device=device, operations=operations)
42
+ self.ln_1 = operations.LayerNorm(dim, dtype=dtype, device=device)
43
+ self.mlp = nn.Sequential(
44
+ OrderedDict([("c_fc", operations.Linear(dim, dim * 4, dtype=dtype, device=device)), ("gelu", QuickGELU()),
45
+ ("c_proj", operations.Linear(dim * 4, dim, dtype=dtype, device=device))]))
46
+ self.ln_2 = operations.LayerNorm(dim, dtype=dtype, device=device)
47
+
48
+ def attention(self, x: torch.Tensor):
49
+ return self.attn(x)
50
+
51
+ def forward(self, x: torch.Tensor):
52
+ x = x + self.attention(self.ln_1(x))
53
+ x = x + self.mlp(self.ln_2(x))
54
+ return x
55
+
56
+ class ControlledUnetModel(UNetModel):
57
+ #implemented in the ldm unet
58
+ pass
59
+
60
+ class ControlNet(nn.Module):
61
+ def __init__(
62
+ self,
63
+ image_size,
64
+ in_channels,
65
+ model_channels,
66
+ hint_channels,
67
+ num_res_blocks,
68
+ dropout=0,
69
+ channel_mult=(1, 2, 4, 8),
70
+ conv_resample=True,
71
+ dims=2,
72
+ num_classes=None,
73
+ use_checkpoint=False,
74
+ dtype=torch.float32,
75
+ num_heads=-1,
76
+ num_head_channels=-1,
77
+ num_heads_upsample=-1,
78
+ use_scale_shift_norm=False,
79
+ resblock_updown=False,
80
+ use_new_attention_order=False,
81
+ use_spatial_transformer=False, # custom transformer support
82
+ transformer_depth=1, # custom transformer support
83
+ context_dim=None, # custom transformer support
84
+ n_embed=None, # custom support for prediction of discrete ids into codebook of first stage vq model
85
+ legacy=True,
86
+ disable_self_attentions=None,
87
+ num_attention_blocks=None,
88
+ disable_middle_self_attn=False,
89
+ use_linear_in_transformer=False,
90
+ adm_in_channels=None,
91
+ transformer_depth_middle=None,
92
+ transformer_depth_output=None,
93
+ attn_precision=None,
94
+ union_controlnet_num_control_type=None,
95
+ device=None,
96
+ operations=comfy.ops.disable_weight_init,
97
+ **kwargs,
98
+ ):
99
+ super().__init__()
100
+ assert use_spatial_transformer == True, "use_spatial_transformer has to be true"
101
+ if use_spatial_transformer:
102
+ assert context_dim is not None, 'Fool!! You forgot to include the dimension of your cross-attention conditioning...'
103
+
104
+ if context_dim is not None:
105
+ assert use_spatial_transformer, 'Fool!! You forgot to use the spatial transformer for your cross-attention conditioning...'
106
+ # from omegaconf.listconfig import ListConfig
107
+ # if type(context_dim) == ListConfig:
108
+ # context_dim = list(context_dim)
109
+
110
+ if num_heads_upsample == -1:
111
+ num_heads_upsample = num_heads
112
+
113
+ if num_heads == -1:
114
+ assert num_head_channels != -1, 'Either num_heads or num_head_channels has to be set'
115
+
116
+ if num_head_channels == -1:
117
+ assert num_heads != -1, 'Either num_heads or num_head_channels has to be set'
118
+
119
+ self.dims = dims
120
+ self.image_size = image_size
121
+ self.in_channels = in_channels
122
+ self.model_channels = model_channels
123
+
124
+ if isinstance(num_res_blocks, int):
125
+ self.num_res_blocks = len(channel_mult) * [num_res_blocks]
126
+ else:
127
+ if len(num_res_blocks) != len(channel_mult):
128
+ raise ValueError("provide num_res_blocks either as an int (globally constant) or "
129
+ "as a list/tuple (per-level) with the same length as channel_mult")
130
+ self.num_res_blocks = num_res_blocks
131
+
132
+ if disable_self_attentions is not None:
133
+ # should be a list of booleans, indicating whether to disable self-attention in TransformerBlocks or not
134
+ assert len(disable_self_attentions) == len(channel_mult)
135
+ if num_attention_blocks is not None:
136
+ assert len(num_attention_blocks) == len(self.num_res_blocks)
137
+ assert all(map(lambda i: self.num_res_blocks[i] >= num_attention_blocks[i], range(len(num_attention_blocks))))
138
+
139
+ transformer_depth = transformer_depth[:]
140
+
141
+ self.dropout = dropout
142
+ self.channel_mult = channel_mult
143
+ self.conv_resample = conv_resample
144
+ self.num_classes = num_classes
145
+ self.use_checkpoint = use_checkpoint
146
+ self.dtype = dtype
147
+ self.num_heads = num_heads
148
+ self.num_head_channels = num_head_channels
149
+ self.num_heads_upsample = num_heads_upsample
150
+ self.predict_codebook_ids = n_embed is not None
151
+
152
+ time_embed_dim = model_channels * 4
153
+ self.time_embed = nn.Sequential(
154
+ operations.Linear(model_channels, time_embed_dim, dtype=self.dtype, device=device),
155
+ nn.SiLU(),
156
+ operations.Linear(time_embed_dim, time_embed_dim, dtype=self.dtype, device=device),
157
+ )
158
+
159
+ if self.num_classes is not None:
160
+ if isinstance(self.num_classes, int):
161
+ self.label_emb = nn.Embedding(num_classes, time_embed_dim)
162
+ elif self.num_classes == "continuous":
163
+ self.label_emb = nn.Linear(1, time_embed_dim)
164
+ elif self.num_classes == "sequential":
165
+ assert adm_in_channels is not None
166
+ self.label_emb = nn.Sequential(
167
+ nn.Sequential(
168
+ operations.Linear(adm_in_channels, time_embed_dim, dtype=self.dtype, device=device),
169
+ nn.SiLU(),
170
+ operations.Linear(time_embed_dim, time_embed_dim, dtype=self.dtype, device=device),
171
+ )
172
+ )
173
+ else:
174
+ raise ValueError()
175
+
176
+ self.input_blocks = nn.ModuleList(
177
+ [
178
+ TimestepEmbedSequential(
179
+ operations.conv_nd(dims, in_channels, model_channels, 3, padding=1, dtype=self.dtype, device=device)
180
+ )
181
+ ]
182
+ )
183
+ self.zero_convs = nn.ModuleList([self.make_zero_conv(model_channels, operations=operations, dtype=self.dtype, device=device)])
184
+
185
+ self.input_hint_block = TimestepEmbedSequential(
186
+ operations.conv_nd(dims, hint_channels, 16, 3, padding=1, dtype=self.dtype, device=device),
187
+ nn.SiLU(),
188
+ operations.conv_nd(dims, 16, 16, 3, padding=1, dtype=self.dtype, device=device),
189
+ nn.SiLU(),
190
+ operations.conv_nd(dims, 16, 32, 3, padding=1, stride=2, dtype=self.dtype, device=device),
191
+ nn.SiLU(),
192
+ operations.conv_nd(dims, 32, 32, 3, padding=1, dtype=self.dtype, device=device),
193
+ nn.SiLU(),
194
+ operations.conv_nd(dims, 32, 96, 3, padding=1, stride=2, dtype=self.dtype, device=device),
195
+ nn.SiLU(),
196
+ operations.conv_nd(dims, 96, 96, 3, padding=1, dtype=self.dtype, device=device),
197
+ nn.SiLU(),
198
+ operations.conv_nd(dims, 96, 256, 3, padding=1, stride=2, dtype=self.dtype, device=device),
199
+ nn.SiLU(),
200
+ operations.conv_nd(dims, 256, model_channels, 3, padding=1, dtype=self.dtype, device=device)
201
+ )
202
+
203
+ self._feature_size = model_channels
204
+ input_block_chans = [model_channels]
205
+ ch = model_channels
206
+ ds = 1
207
+ for level, mult in enumerate(channel_mult):
208
+ for nr in range(self.num_res_blocks[level]):
209
+ layers = [
210
+ ResBlock(
211
+ ch,
212
+ time_embed_dim,
213
+ dropout,
214
+ out_channels=mult * model_channels,
215
+ dims=dims,
216
+ use_checkpoint=use_checkpoint,
217
+ use_scale_shift_norm=use_scale_shift_norm,
218
+ dtype=self.dtype,
219
+ device=device,
220
+ operations=operations,
221
+ )
222
+ ]
223
+ ch = mult * model_channels
224
+ num_transformers = transformer_depth.pop(0)
225
+ if num_transformers > 0:
226
+ if num_head_channels == -1:
227
+ dim_head = ch // num_heads
228
+ else:
229
+ num_heads = ch // num_head_channels
230
+ dim_head = num_head_channels
231
+ if legacy:
232
+ #num_heads = 1
233
+ dim_head = ch // num_heads if use_spatial_transformer else num_head_channels
234
+ if exists(disable_self_attentions):
235
+ disabled_sa = disable_self_attentions[level]
236
+ else:
237
+ disabled_sa = False
238
+
239
+ if not exists(num_attention_blocks) or nr < num_attention_blocks[level]:
240
+ layers.append(
241
+ SpatialTransformer(
242
+ ch, num_heads, dim_head, depth=num_transformers, context_dim=context_dim,
243
+ disable_self_attn=disabled_sa, use_linear=use_linear_in_transformer,
244
+ use_checkpoint=use_checkpoint, attn_precision=attn_precision, dtype=self.dtype, device=device, operations=operations
245
+ )
246
+ )
247
+ self.input_blocks.append(TimestepEmbedSequential(*layers))
248
+ self.zero_convs.append(self.make_zero_conv(ch, operations=operations, dtype=self.dtype, device=device))
249
+ self._feature_size += ch
250
+ input_block_chans.append(ch)
251
+ if level != len(channel_mult) - 1:
252
+ out_ch = ch
253
+ self.input_blocks.append(
254
+ TimestepEmbedSequential(
255
+ ResBlock(
256
+ ch,
257
+ time_embed_dim,
258
+ dropout,
259
+ out_channels=out_ch,
260
+ dims=dims,
261
+ use_checkpoint=use_checkpoint,
262
+ use_scale_shift_norm=use_scale_shift_norm,
263
+ down=True,
264
+ dtype=self.dtype,
265
+ device=device,
266
+ operations=operations
267
+ )
268
+ if resblock_updown
269
+ else Downsample(
270
+ ch, conv_resample, dims=dims, out_channels=out_ch, dtype=self.dtype, device=device, operations=operations
271
+ )
272
+ )
273
+ )
274
+ ch = out_ch
275
+ input_block_chans.append(ch)
276
+ self.zero_convs.append(self.make_zero_conv(ch, operations=operations, dtype=self.dtype, device=device))
277
+ ds *= 2
278
+ self._feature_size += ch
279
+
280
+ if num_head_channels == -1:
281
+ dim_head = ch // num_heads
282
+ else:
283
+ num_heads = ch // num_head_channels
284
+ dim_head = num_head_channels
285
+ if legacy:
286
+ #num_heads = 1
287
+ dim_head = ch // num_heads if use_spatial_transformer else num_head_channels
288
+ mid_block = [
289
+ ResBlock(
290
+ ch,
291
+ time_embed_dim,
292
+ dropout,
293
+ dims=dims,
294
+ use_checkpoint=use_checkpoint,
295
+ use_scale_shift_norm=use_scale_shift_norm,
296
+ dtype=self.dtype,
297
+ device=device,
298
+ operations=operations
299
+ )]
300
+ if transformer_depth_middle >= 0:
301
+ mid_block += [SpatialTransformer( # always uses a self-attn
302
+ ch, num_heads, dim_head, depth=transformer_depth_middle, context_dim=context_dim,
303
+ disable_self_attn=disable_middle_self_attn, use_linear=use_linear_in_transformer,
304
+ use_checkpoint=use_checkpoint, attn_precision=attn_precision, dtype=self.dtype, device=device, operations=operations
305
+ ),
306
+ ResBlock(
307
+ ch,
308
+ time_embed_dim,
309
+ dropout,
310
+ dims=dims,
311
+ use_checkpoint=use_checkpoint,
312
+ use_scale_shift_norm=use_scale_shift_norm,
313
+ dtype=self.dtype,
314
+ device=device,
315
+ operations=operations
316
+ )]
317
+ self.middle_block = TimestepEmbedSequential(*mid_block)
318
+ self.middle_block_out = self.make_zero_conv(ch, operations=operations, dtype=self.dtype, device=device)
319
+ self._feature_size += ch
320
+
321
+ if union_controlnet_num_control_type is not None:
322
+ self.num_control_type = union_controlnet_num_control_type
323
+ num_trans_channel = 320
324
+ num_trans_head = 8
325
+ num_trans_layer = 1
326
+ num_proj_channel = 320
327
+ # task_scale_factor = num_trans_channel ** 0.5
328
+ self.task_embedding = nn.Parameter(torch.empty(self.num_control_type, num_trans_channel, dtype=self.dtype, device=device))
329
+
330
+ self.transformer_layes = nn.Sequential(*[ResBlockUnionControlnet(num_trans_channel, num_trans_head, dtype=self.dtype, device=device, operations=operations) for _ in range(num_trans_layer)])
331
+ self.spatial_ch_projs = operations.Linear(num_trans_channel, num_proj_channel, dtype=self.dtype, device=device)
332
+ #-----------------------------------------------------------------------------------------------------
333
+
334
+ control_add_embed_dim = 256
335
+ class ControlAddEmbedding(nn.Module):
336
+ def __init__(self, in_dim, out_dim, num_control_type, dtype=None, device=None, operations=None):
337
+ super().__init__()
338
+ self.num_control_type = num_control_type
339
+ self.in_dim = in_dim
340
+ self.linear_1 = operations.Linear(in_dim * num_control_type, out_dim, dtype=dtype, device=device)
341
+ self.linear_2 = operations.Linear(out_dim, out_dim, dtype=dtype, device=device)
342
+ def forward(self, control_type, dtype, device):
343
+ c_type = torch.zeros((self.num_control_type,), device=device)
344
+ c_type[control_type] = 1.0
345
+ c_type = timestep_embedding(c_type.flatten(), self.in_dim, repeat_only=False).to(dtype).reshape((-1, self.num_control_type * self.in_dim))
346
+ return self.linear_2(torch.nn.functional.silu(self.linear_1(c_type)))
347
+
348
+ self.control_add_embedding = ControlAddEmbedding(control_add_embed_dim, time_embed_dim, self.num_control_type, dtype=self.dtype, device=device, operations=operations)
349
+ else:
350
+ self.task_embedding = None
351
+ self.control_add_embedding = None
352
+
353
+ def union_controlnet_merge(self, hint, control_type, emb, context):
354
+ # Equivalent to: https://github.com/xinsir6/ControlNetPlus/tree/main
355
+ inputs = []
356
+ condition_list = []
357
+
358
+ for idx in range(min(1, len(control_type))):
359
+ controlnet_cond = self.input_hint_block(hint[idx], emb, context)
360
+ feat_seq = torch.mean(controlnet_cond, dim=(2, 3))
361
+ if idx < len(control_type):
362
+ feat_seq += self.task_embedding[control_type[idx]].to(dtype=feat_seq.dtype, device=feat_seq.device)
363
+
364
+ inputs.append(feat_seq.unsqueeze(1))
365
+ condition_list.append(controlnet_cond)
366
+
367
+ x = torch.cat(inputs, dim=1)
368
+ x = self.transformer_layes(x)
369
+ controlnet_cond_fuser = None
370
+ for idx in range(len(control_type)):
371
+ alpha = self.spatial_ch_projs(x[:, idx])
372
+ alpha = alpha.unsqueeze(-1).unsqueeze(-1)
373
+ o = condition_list[idx] + alpha
374
+ if controlnet_cond_fuser is None:
375
+ controlnet_cond_fuser = o
376
+ else:
377
+ controlnet_cond_fuser += o
378
+ return controlnet_cond_fuser
379
+
380
+ def make_zero_conv(self, channels, operations=None, dtype=None, device=None):
381
+ return TimestepEmbedSequential(operations.conv_nd(self.dims, channels, channels, 1, padding=0, dtype=dtype, device=device))
382
+
383
+ def forward(self, x, hint, timesteps, context, y=None, **kwargs):
384
+ t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False).to(x.dtype)
385
+ emb = self.time_embed(t_emb)
386
+
387
+ guided_hint = None
388
+ if self.control_add_embedding is not None: #Union Controlnet
389
+ control_type = kwargs.get("control_type", [])
390
+
391
+ if any([c >= self.num_control_type for c in control_type]):
392
+ max_type = max(control_type)
393
+ max_type_name = {
394
+ v: k for k, v in UNION_CONTROLNET_TYPES.items()
395
+ }[max_type]
396
+ raise ValueError(
397
+ f"Control type {max_type_name}({max_type}) is out of range for the number of control types" +
398
+ f"({self.num_control_type}) supported.\n" +
399
+ "Please consider using the ProMax ControlNet Union model.\n" +
400
+ "https://huggingface.co/xinsir/controlnet-union-sdxl-1.0/tree/main"
401
+ )
402
+
403
+ emb += self.control_add_embedding(control_type, emb.dtype, emb.device)
404
+ if len(control_type) > 0:
405
+ if len(hint.shape) < 5:
406
+ hint = hint.unsqueeze(dim=0)
407
+ guided_hint = self.union_controlnet_merge(hint, control_type, emb, context)
408
+
409
+ if guided_hint is None:
410
+ guided_hint = self.input_hint_block(hint, emb, context)
411
+
412
+ out_output = []
413
+ out_middle = []
414
+
415
+ if self.num_classes is not None:
416
+ assert y.shape[0] == x.shape[0]
417
+ emb = emb + self.label_emb(y)
418
+
419
+ h = x
420
+ for module, zero_conv in zip(self.input_blocks, self.zero_convs):
421
+ if guided_hint is not None:
422
+ h = module(h, emb, context)
423
+ h += guided_hint
424
+ guided_hint = None
425
+ else:
426
+ h = module(h, emb, context)
427
+ out_output.append(zero_conv(h, emb, context))
428
+
429
+ h = self.middle_block(h, emb, context)
430
+ out_middle.append(self.middle_block_out(h, emb, context))
431
+
432
+ return {"middle": out_middle, "output": out_output}
433
+
comfy/cldm/control_types.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ UNION_CONTROLNET_TYPES = {
2
+ "openpose": 0,
3
+ "depth": 1,
4
+ "hed/pidi/scribble/ted": 2,
5
+ "canny/lineart/anime_lineart/mlsd": 3,
6
+ "normal": 4,
7
+ "segment": 5,
8
+ "tile": 6,
9
+ "repaint": 7,
10
+ }
comfy/cldm/dit_embedder.py ADDED
@@ -0,0 +1,120 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ from typing import List, Optional, Tuple
3
+
4
+ import torch
5
+ import torch.nn as nn
6
+ from torch import Tensor
7
+
8
+ from comfy.ldm.modules.diffusionmodules.mmdit import DismantledBlock, PatchEmbed, VectorEmbedder, TimestepEmbedder, get_2d_sincos_pos_embed_torch
9
+
10
+
11
+ class ControlNetEmbedder(nn.Module):
12
+
13
+ def __init__(
14
+ self,
15
+ img_size: int,
16
+ patch_size: int,
17
+ in_chans: int,
18
+ attention_head_dim: int,
19
+ num_attention_heads: int,
20
+ adm_in_channels: int,
21
+ num_layers: int,
22
+ main_model_double: int,
23
+ double_y_emb: bool,
24
+ device: torch.device,
25
+ dtype: torch.dtype,
26
+ pos_embed_max_size: Optional[int] = None,
27
+ operations = None,
28
+ ):
29
+ super().__init__()
30
+ self.main_model_double = main_model_double
31
+ self.dtype = dtype
32
+ self.hidden_size = num_attention_heads * attention_head_dim
33
+ self.patch_size = patch_size
34
+ self.x_embedder = PatchEmbed(
35
+ img_size=img_size,
36
+ patch_size=patch_size,
37
+ in_chans=in_chans,
38
+ embed_dim=self.hidden_size,
39
+ strict_img_size=pos_embed_max_size is None,
40
+ device=device,
41
+ dtype=dtype,
42
+ operations=operations,
43
+ )
44
+
45
+ self.t_embedder = TimestepEmbedder(self.hidden_size, dtype=dtype, device=device, operations=operations)
46
+
47
+ self.double_y_emb = double_y_emb
48
+ if self.double_y_emb:
49
+ self.orig_y_embedder = VectorEmbedder(
50
+ adm_in_channels, self.hidden_size, dtype, device, operations=operations
51
+ )
52
+ self.y_embedder = VectorEmbedder(
53
+ self.hidden_size, self.hidden_size, dtype, device, operations=operations
54
+ )
55
+ else:
56
+ self.y_embedder = VectorEmbedder(
57
+ adm_in_channels, self.hidden_size, dtype, device, operations=operations
58
+ )
59
+
60
+ self.transformer_blocks = nn.ModuleList(
61
+ DismantledBlock(
62
+ hidden_size=self.hidden_size, num_heads=num_attention_heads, qkv_bias=True,
63
+ dtype=dtype, device=device, operations=operations
64
+ )
65
+ for _ in range(num_layers)
66
+ )
67
+
68
+ # self.use_y_embedder = pooled_projection_dim != self.time_text_embed.text_embedder.linear_1.in_features
69
+ # TODO double check this logic when 8b
70
+ self.use_y_embedder = True
71
+
72
+ self.controlnet_blocks = nn.ModuleList([])
73
+ for _ in range(len(self.transformer_blocks)):
74
+ controlnet_block = operations.Linear(self.hidden_size, self.hidden_size, dtype=dtype, device=device)
75
+ self.controlnet_blocks.append(controlnet_block)
76
+
77
+ self.pos_embed_input = PatchEmbed(
78
+ img_size=img_size,
79
+ patch_size=patch_size,
80
+ in_chans=in_chans,
81
+ embed_dim=self.hidden_size,
82
+ strict_img_size=False,
83
+ device=device,
84
+ dtype=dtype,
85
+ operations=operations,
86
+ )
87
+
88
+ def forward(
89
+ self,
90
+ x: torch.Tensor,
91
+ timesteps: torch.Tensor,
92
+ y: Optional[torch.Tensor] = None,
93
+ context: Optional[torch.Tensor] = None,
94
+ hint = None,
95
+ ) -> Tuple[Tensor, List[Tensor]]:
96
+ x_shape = list(x.shape)
97
+ x = self.x_embedder(x)
98
+ if not self.double_y_emb:
99
+ h = (x_shape[-2] + 1) // self.patch_size
100
+ w = (x_shape[-1] + 1) // self.patch_size
101
+ x += get_2d_sincos_pos_embed_torch(self.hidden_size, w, h, device=x.device)
102
+ c = self.t_embedder(timesteps, dtype=x.dtype)
103
+ if y is not None and self.y_embedder is not None:
104
+ if self.double_y_emb:
105
+ y = self.orig_y_embedder(y)
106
+ y = self.y_embedder(y)
107
+ c = c + y
108
+
109
+ x = x + self.pos_embed_input(hint)
110
+
111
+ block_out = ()
112
+
113
+ repeat = math.ceil(self.main_model_double / len(self.transformer_blocks))
114
+ for i in range(len(self.transformer_blocks)):
115
+ out = self.transformer_blocks[i](x, c)
116
+ if not self.double_y_emb:
117
+ x = out
118
+ block_out += (self.controlnet_blocks[i](out),) * repeat
119
+
120
+ return {"output": block_out}
comfy/cldm/mmdit.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from typing import Optional
3
+ import comfy.ldm.modules.diffusionmodules.mmdit
4
+
5
+ class ControlNet(comfy.ldm.modules.diffusionmodules.mmdit.MMDiT):
6
+ def __init__(
7
+ self,
8
+ num_blocks = None,
9
+ control_latent_channels = None,
10
+ dtype = None,
11
+ device = None,
12
+ operations = None,
13
+ **kwargs,
14
+ ):
15
+ super().__init__(dtype=dtype, device=device, operations=operations, final_layer=False, num_blocks=num_blocks, **kwargs)
16
+ # controlnet_blocks
17
+ self.controlnet_blocks = torch.nn.ModuleList([])
18
+ for _ in range(len(self.joint_blocks)):
19
+ self.controlnet_blocks.append(operations.Linear(self.hidden_size, self.hidden_size, device=device, dtype=dtype))
20
+
21
+ if control_latent_channels is None:
22
+ control_latent_channels = self.in_channels
23
+
24
+ self.pos_embed_input = comfy.ldm.modules.diffusionmodules.mmdit.PatchEmbed(
25
+ None,
26
+ self.patch_size,
27
+ control_latent_channels,
28
+ self.hidden_size,
29
+ bias=True,
30
+ strict_img_size=False,
31
+ dtype=dtype,
32
+ device=device,
33
+ operations=operations
34
+ )
35
+
36
+ def forward(
37
+ self,
38
+ x: torch.Tensor,
39
+ timesteps: torch.Tensor,
40
+ y: Optional[torch.Tensor] = None,
41
+ context: Optional[torch.Tensor] = None,
42
+ hint = None,
43
+ ) -> torch.Tensor:
44
+
45
+ #weird sd3 controlnet specific stuff
46
+ y = torch.zeros_like(y)
47
+
48
+ if self.context_processor is not None:
49
+ context = self.context_processor(context)
50
+
51
+ hw = x.shape[-2:]
52
+ x = self.x_embedder(x) + self.cropped_pos_embed(hw, device=x.device).to(dtype=x.dtype, device=x.device)
53
+ x += self.pos_embed_input(hint)
54
+
55
+ c = self.t_embedder(timesteps, dtype=x.dtype)
56
+ if y is not None and self.y_embedder is not None:
57
+ y = self.y_embedder(y)
58
+ c = c + y
59
+
60
+ if context is not None:
61
+ context = self.context_embedder(context)
62
+
63
+ output = []
64
+
65
+ blocks = len(self.joint_blocks)
66
+ for i in range(blocks):
67
+ context, x = self.joint_blocks[i](
68
+ context,
69
+ x,
70
+ c=c,
71
+ use_checkpoint=self.use_checkpoint,
72
+ )
73
+
74
+ out = self.controlnet_blocks[i](x)
75
+ count = self.depth // blocks
76
+ if i == blocks - 1:
77
+ count -= 1
78
+ for j in range(count):
79
+ output.append(out)
80
+
81
+ return {"output": output}
comfy/cli_args.py ADDED
@@ -0,0 +1,190 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import enum
3
+ import os
4
+ from typing import Optional
5
+ import comfy.options
6
+
7
+
8
+ class EnumAction(argparse.Action):
9
+ """
10
+ Argparse action for handling Enums
11
+ """
12
+ def __init__(self, **kwargs):
13
+ # Pop off the type value
14
+ enum_type = kwargs.pop("type", None)
15
+
16
+ # Ensure an Enum subclass is provided
17
+ if enum_type is None:
18
+ raise ValueError("type must be assigned an Enum when using EnumAction")
19
+ if not issubclass(enum_type, enum.Enum):
20
+ raise TypeError("type must be an Enum when using EnumAction")
21
+
22
+ # Generate choices from the Enum
23
+ choices = tuple(e.value for e in enum_type)
24
+ kwargs.setdefault("choices", choices)
25
+ kwargs.setdefault("metavar", f"[{','.join(list(choices))}]")
26
+
27
+ super(EnumAction, self).__init__(**kwargs)
28
+
29
+ self._enum = enum_type
30
+
31
+ def __call__(self, parser, namespace, values, option_string=None):
32
+ # Convert value back into an Enum
33
+ value = self._enum(values)
34
+ setattr(namespace, self.dest, value)
35
+
36
+
37
+ parser = argparse.ArgumentParser()
38
+
39
+ parser.add_argument("--listen", type=str, default="127.0.0.1", metavar="IP", nargs="?", const="0.0.0.0,::", help="Specify the IP address to listen on (default: 127.0.0.1). You can give a list of ip addresses by separating them with a comma like: 127.2.2.2,127.3.3.3 If --listen is provided without an argument, it defaults to 0.0.0.0,:: (listens on all ipv4 and ipv6)")
40
+ parser.add_argument("--port", type=int, default=8188, help="Set the listen port.")
41
+ parser.add_argument("--tls-keyfile", type=str, help="Path to TLS (SSL) key file. Enables TLS, makes app accessible at https://... requires --tls-certfile to function")
42
+ parser.add_argument("--tls-certfile", type=str, help="Path to TLS (SSL) certificate file. Enables TLS, makes app accessible at https://... requires --tls-keyfile to function")
43
+ parser.add_argument("--enable-cors-header", type=str, default=None, metavar="ORIGIN", nargs="?", const="*", help="Enable CORS (Cross-Origin Resource Sharing) with optional origin or allow all with default '*'.")
44
+ parser.add_argument("--max-upload-size", type=float, default=100, help="Set the maximum upload size in MB.")
45
+
46
+ parser.add_argument("--extra-model-paths-config", type=str, default=None, metavar="PATH", nargs='+', action='append', help="Load one or more extra_model_paths.yaml files.")
47
+ parser.add_argument("--output-directory", type=str, default=None, help="Set the ComfyUI output directory.")
48
+ parser.add_argument("--temp-directory", type=str, default=None, help="Set the ComfyUI temp directory (default is in the ComfyUI directory).")
49
+ parser.add_argument("--input-directory", type=str, default=None, help="Set the ComfyUI input directory.")
50
+ parser.add_argument("--auto-launch", action="store_true", help="Automatically launch ComfyUI in the default browser.")
51
+ parser.add_argument("--disable-auto-launch", action="store_true", help="Disable auto launching the browser.")
52
+ parser.add_argument("--cuda-device", type=int, default=None, metavar="DEVICE_ID", help="Set the id of the cuda device this instance will use.")
53
+ cm_group = parser.add_mutually_exclusive_group()
54
+ cm_group.add_argument("--cuda-malloc", action="store_true", help="Enable cudaMallocAsync (enabled by default for torch 2.0 and up).")
55
+ cm_group.add_argument("--disable-cuda-malloc", action="store_true", help="Disable cudaMallocAsync.")
56
+
57
+
58
+ fp_group = parser.add_mutually_exclusive_group()
59
+ fp_group.add_argument("--force-fp32", action="store_true", help="Force fp32 (If this makes your GPU work better please report it).")
60
+ fp_group.add_argument("--force-fp16", action="store_true", help="Force fp16.")
61
+
62
+ fpunet_group = parser.add_mutually_exclusive_group()
63
+ fpunet_group.add_argument("--fp32-unet", action="store_true", help="Run the diffusion model in fp32.")
64
+ fpunet_group.add_argument("--fp64-unet", action="store_true", help="Run the diffusion model in fp64.")
65
+ fpunet_group.add_argument("--bf16-unet", action="store_true", help="Run the diffusion model in bf16.")
66
+ fpunet_group.add_argument("--fp16-unet", action="store_true", help="Run the diffusion model in fp16")
67
+ fpunet_group.add_argument("--fp8_e4m3fn-unet", action="store_true", help="Store unet weights in fp8_e4m3fn.")
68
+ fpunet_group.add_argument("--fp8_e5m2-unet", action="store_true", help="Store unet weights in fp8_e5m2.")
69
+
70
+ fpvae_group = parser.add_mutually_exclusive_group()
71
+ fpvae_group.add_argument("--fp16-vae", action="store_true", help="Run the VAE in fp16, might cause black images.")
72
+ fpvae_group.add_argument("--fp32-vae", action="store_true", help="Run the VAE in full precision fp32.")
73
+ fpvae_group.add_argument("--bf16-vae", action="store_true", help="Run the VAE in bf16.")
74
+
75
+ parser.add_argument("--cpu-vae", action="store_true", help="Run the VAE on the CPU.")
76
+
77
+ fpte_group = parser.add_mutually_exclusive_group()
78
+ fpte_group.add_argument("--fp8_e4m3fn-text-enc", action="store_true", help="Store text encoder weights in fp8 (e4m3fn variant).")
79
+ fpte_group.add_argument("--fp8_e5m2-text-enc", action="store_true", help="Store text encoder weights in fp8 (e5m2 variant).")
80
+ fpte_group.add_argument("--fp16-text-enc", action="store_true", help="Store text encoder weights in fp16.")
81
+ fpte_group.add_argument("--fp32-text-enc", action="store_true", help="Store text encoder weights in fp32.")
82
+
83
+ parser.add_argument("--force-channels-last", action="store_true", help="Force channels last format when inferencing the models.")
84
+
85
+ parser.add_argument("--directml", type=int, nargs="?", metavar="DIRECTML_DEVICE", const=-1, help="Use torch-directml.")
86
+
87
+ parser.add_argument("--oneapi-device-selector", type=str, default=None, metavar="SELECTOR_STRING", help="Sets the oneAPI device(s) this instance will use.")
88
+ parser.add_argument("--disable-ipex-optimize", action="store_true", help="Disables ipex.optimize default when loading models with Intel's Extension for Pytorch.")
89
+
90
+ class LatentPreviewMethod(enum.Enum):
91
+ NoPreviews = "none"
92
+ Auto = "auto"
93
+ Latent2RGB = "latent2rgb"
94
+ TAESD = "taesd"
95
+
96
+ parser.add_argument("--preview-method", type=LatentPreviewMethod, default=LatentPreviewMethod.NoPreviews, help="Default preview method for sampler nodes.", action=EnumAction)
97
+
98
+ parser.add_argument("--preview-size", type=int, default=512, help="Sets the maximum preview size for sampler nodes.")
99
+
100
+ cache_group = parser.add_mutually_exclusive_group()
101
+ cache_group.add_argument("--cache-classic", action="store_true", help="Use the old style (aggressive) caching.")
102
+ cache_group.add_argument("--cache-lru", type=int, default=0, help="Use LRU caching with a maximum of N node results cached. May use more RAM/VRAM.")
103
+
104
+ attn_group = parser.add_mutually_exclusive_group()
105
+ attn_group.add_argument("--use-split-cross-attention", action="store_true", help="Use the split cross attention optimization. Ignored when xformers is used.")
106
+ attn_group.add_argument("--use-quad-cross-attention", action="store_true", help="Use the sub-quadratic cross attention optimization . Ignored when xformers is used.")
107
+ attn_group.add_argument("--use-pytorch-cross-attention", action="store_true", help="Use the new pytorch 2.0 cross attention function.")
108
+ attn_group.add_argument("--use-sage-attention", action="store_true", help="Use sage attention.")
109
+
110
+ parser.add_argument("--disable-xformers", action="store_true", help="Disable xformers.")
111
+
112
+ upcast = parser.add_mutually_exclusive_group()
113
+ upcast.add_argument("--force-upcast-attention", action="store_true", help="Force enable attention upcasting, please report if it fixes black images.")
114
+ upcast.add_argument("--dont-upcast-attention", action="store_true", help="Disable all upcasting of attention. Should be unnecessary except for debugging.")
115
+
116
+
117
+ vram_group = parser.add_mutually_exclusive_group()
118
+ vram_group.add_argument("--gpu-only", action="store_true", help="Store and run everything (text encoders/CLIP models, etc... on the GPU).")
119
+ vram_group.add_argument("--highvram", action="store_true", help="By default models will be unloaded to CPU memory after being used. This option keeps them in GPU memory.")
120
+ vram_group.add_argument("--normalvram", action="store_true", help="Used to force normal vram use if lowvram gets automatically enabled.")
121
+ vram_group.add_argument("--lowvram", action="store_true", help="Split the unet in parts to use less vram.")
122
+ vram_group.add_argument("--novram", action="store_true", help="When lowvram isn't enough.")
123
+ vram_group.add_argument("--cpu", action="store_true", help="To use the CPU for everything (slow).")
124
+
125
+ parser.add_argument("--reserve-vram", type=float, default=None, help="Set the amount of vram in GB you want to reserve for use by your OS/other software. By default some amount is reserved depending on your OS.")
126
+
127
+
128
+ parser.add_argument("--default-hashing-function", type=str, choices=['md5', 'sha1', 'sha256', 'sha512'], default='sha256', help="Allows you to choose the hash function to use for duplicate filename / contents comparison. Default is sha256.")
129
+
130
+ parser.add_argument("--disable-smart-memory", action="store_true", help="Force ComfyUI to agressively offload to regular ram instead of keeping models in vram when it can.")
131
+ parser.add_argument("--deterministic", action="store_true", help="Make pytorch use slower deterministic algorithms when it can. Note that this might not make images deterministic in all cases.")
132
+ parser.add_argument("--fast", action="store_true", help="Enable some untested and potentially quality deteriorating optimizations.")
133
+
134
+ parser.add_argument("--dont-print-server", action="store_true", help="Don't print server output.")
135
+ parser.add_argument("--quick-test-for-ci", action="store_true", help="Quick test for CI.")
136
+ parser.add_argument("--windows-standalone-build", action="store_true", help="Windows standalone build: Enable convenient things that most people using the standalone windows build will probably enjoy (like auto opening the page on startup).")
137
+
138
+ parser.add_argument("--disable-metadata", action="store_true", help="Disable saving prompt metadata in files.")
139
+ parser.add_argument("--disable-all-custom-nodes", action="store_true", help="Disable loading all custom nodes.")
140
+
141
+ parser.add_argument("--multi-user", action="store_true", help="Enables per-user storage.")
142
+
143
+ parser.add_argument("--verbose", default='INFO', const='DEBUG', nargs="?", choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'], help='Set the logging level')
144
+ parser.add_argument("--log-stdout", action="store_true", help="Send normal process output to stdout instead of stderr (default).")
145
+
146
+ # The default built-in provider hosted under web/
147
+ DEFAULT_VERSION_STRING = "comfyanonymous/ComfyUI@latest"
148
+
149
+ parser.add_argument(
150
+ "--front-end-version",
151
+ type=str,
152
+ default=DEFAULT_VERSION_STRING,
153
+ help="""
154
+ Specifies the version of the frontend to be used. This command needs internet connectivity to query and
155
+ download available frontend implementations from GitHub releases.
156
+
157
+ The version string should be in the format of:
158
+ [repoOwner]/[repoName]@[version]
159
+ where version is one of: "latest" or a valid version number (e.g. "1.0.0")
160
+ """,
161
+ )
162
+
163
+ def is_valid_directory(path: Optional[str]) -> Optional[str]:
164
+ """Validate if the given path is a directory."""
165
+ if path is None:
166
+ return None
167
+
168
+ if not os.path.isdir(path):
169
+ raise argparse.ArgumentTypeError(f"{path} is not a valid directory.")
170
+ return path
171
+
172
+ parser.add_argument(
173
+ "--front-end-root",
174
+ type=is_valid_directory,
175
+ default=None,
176
+ help="The local filesystem path to the directory where the frontend is located. Overrides --front-end-version.",
177
+ )
178
+
179
+ parser.add_argument("--user-directory", type=is_valid_directory, default=None, help="Set the ComfyUI user directory with an absolute path.")
180
+
181
+ if comfy.options.args_parsing:
182
+ args = parser.parse_args()
183
+ else:
184
+ args = parser.parse_args([])
185
+
186
+ if args.windows_standalone_build:
187
+ args.auto_launch = True
188
+
189
+ if args.disable_auto_launch:
190
+ args.auto_launch = False
comfy/clip_config_bigg.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "CLIPTextModel"
4
+ ],
5
+ "attention_dropout": 0.0,
6
+ "bos_token_id": 0,
7
+ "dropout": 0.0,
8
+ "eos_token_id": 49407,
9
+ "hidden_act": "gelu",
10
+ "hidden_size": 1280,
11
+ "initializer_factor": 1.0,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 5120,
14
+ "layer_norm_eps": 1e-05,
15
+ "max_position_embeddings": 77,
16
+ "model_type": "clip_text_model",
17
+ "num_attention_heads": 20,
18
+ "num_hidden_layers": 32,
19
+ "pad_token_id": 1,
20
+ "projection_dim": 1280,
21
+ "torch_dtype": "float32",
22
+ "vocab_size": 49408
23
+ }
comfy/clip_model.py ADDED
@@ -0,0 +1,218 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from comfy.ldm.modules.attention import optimized_attention_for_device
3
+ import comfy.ops
4
+
5
+ class CLIPAttention(torch.nn.Module):
6
+ def __init__(self, embed_dim, heads, dtype, device, operations):
7
+ super().__init__()
8
+
9
+ self.heads = heads
10
+ self.q_proj = operations.Linear(embed_dim, embed_dim, bias=True, dtype=dtype, device=device)
11
+ self.k_proj = operations.Linear(embed_dim, embed_dim, bias=True, dtype=dtype, device=device)
12
+ self.v_proj = operations.Linear(embed_dim, embed_dim, bias=True, dtype=dtype, device=device)
13
+
14
+ self.out_proj = operations.Linear(embed_dim, embed_dim, bias=True, dtype=dtype, device=device)
15
+
16
+ def forward(self, x, mask=None, optimized_attention=None):
17
+ q = self.q_proj(x)
18
+ k = self.k_proj(x)
19
+ v = self.v_proj(x)
20
+
21
+ out = optimized_attention(q, k, v, self.heads, mask)
22
+ return self.out_proj(out)
23
+
24
+ ACTIVATIONS = {"quick_gelu": lambda a: a * torch.sigmoid(1.702 * a),
25
+ "gelu": torch.nn.functional.gelu,
26
+ "gelu_pytorch_tanh": lambda a: torch.nn.functional.gelu(a, approximate="tanh"),
27
+ }
28
+
29
+ class CLIPMLP(torch.nn.Module):
30
+ def __init__(self, embed_dim, intermediate_size, activation, dtype, device, operations):
31
+ super().__init__()
32
+ self.fc1 = operations.Linear(embed_dim, intermediate_size, bias=True, dtype=dtype, device=device)
33
+ self.activation = ACTIVATIONS[activation]
34
+ self.fc2 = operations.Linear(intermediate_size, embed_dim, bias=True, dtype=dtype, device=device)
35
+
36
+ def forward(self, x):
37
+ x = self.fc1(x)
38
+ x = self.activation(x)
39
+ x = self.fc2(x)
40
+ return x
41
+
42
+ class CLIPLayer(torch.nn.Module):
43
+ def __init__(self, embed_dim, heads, intermediate_size, intermediate_activation, dtype, device, operations):
44
+ super().__init__()
45
+ self.layer_norm1 = operations.LayerNorm(embed_dim, dtype=dtype, device=device)
46
+ self.self_attn = CLIPAttention(embed_dim, heads, dtype, device, operations)
47
+ self.layer_norm2 = operations.LayerNorm(embed_dim, dtype=dtype, device=device)
48
+ self.mlp = CLIPMLP(embed_dim, intermediate_size, intermediate_activation, dtype, device, operations)
49
+
50
+ def forward(self, x, mask=None, optimized_attention=None):
51
+ x += self.self_attn(self.layer_norm1(x), mask, optimized_attention)
52
+ x += self.mlp(self.layer_norm2(x))
53
+ return x
54
+
55
+
56
+ class CLIPEncoder(torch.nn.Module):
57
+ def __init__(self, num_layers, embed_dim, heads, intermediate_size, intermediate_activation, dtype, device, operations):
58
+ super().__init__()
59
+ self.layers = torch.nn.ModuleList([CLIPLayer(embed_dim, heads, intermediate_size, intermediate_activation, dtype, device, operations) for i in range(num_layers)])
60
+
61
+ def forward(self, x, mask=None, intermediate_output=None):
62
+ optimized_attention = optimized_attention_for_device(x.device, mask=mask is not None, small_input=True)
63
+
64
+ if intermediate_output is not None:
65
+ if intermediate_output < 0:
66
+ intermediate_output = len(self.layers) + intermediate_output
67
+
68
+ intermediate = None
69
+ for i, l in enumerate(self.layers):
70
+ x = l(x, mask, optimized_attention)
71
+ if i == intermediate_output:
72
+ intermediate = x.clone()
73
+ return x, intermediate
74
+
75
+ class CLIPEmbeddings(torch.nn.Module):
76
+ def __init__(self, embed_dim, vocab_size=49408, num_positions=77, dtype=None, device=None, operations=None):
77
+ super().__init__()
78
+ self.token_embedding = operations.Embedding(vocab_size, embed_dim, dtype=dtype, device=device)
79
+ self.position_embedding = operations.Embedding(num_positions, embed_dim, dtype=dtype, device=device)
80
+
81
+ def forward(self, input_tokens, dtype=torch.float32):
82
+ return self.token_embedding(input_tokens, out_dtype=dtype) + comfy.ops.cast_to(self.position_embedding.weight, dtype=dtype, device=input_tokens.device)
83
+
84
+
85
+ class CLIPTextModel_(torch.nn.Module):
86
+ def __init__(self, config_dict, dtype, device, operations):
87
+ num_layers = config_dict["num_hidden_layers"]
88
+ embed_dim = config_dict["hidden_size"]
89
+ heads = config_dict["num_attention_heads"]
90
+ intermediate_size = config_dict["intermediate_size"]
91
+ intermediate_activation = config_dict["hidden_act"]
92
+ num_positions = config_dict["max_position_embeddings"]
93
+ self.eos_token_id = config_dict["eos_token_id"]
94
+
95
+ super().__init__()
96
+ self.embeddings = CLIPEmbeddings(embed_dim, num_positions=num_positions, dtype=dtype, device=device, operations=operations)
97
+ self.encoder = CLIPEncoder(num_layers, embed_dim, heads, intermediate_size, intermediate_activation, dtype, device, operations)
98
+ self.final_layer_norm = operations.LayerNorm(embed_dim, dtype=dtype, device=device)
99
+
100
+ def forward(self, input_tokens, attention_mask=None, intermediate_output=None, final_layer_norm_intermediate=True, dtype=torch.float32):
101
+ x = self.embeddings(input_tokens, dtype=dtype)
102
+ mask = None
103
+ if attention_mask is not None:
104
+ mask = 1.0 - attention_mask.to(x.dtype).reshape((attention_mask.shape[0], 1, -1, attention_mask.shape[-1])).expand(attention_mask.shape[0], 1, attention_mask.shape[-1], attention_mask.shape[-1])
105
+ mask = mask.masked_fill(mask.to(torch.bool), float("-inf"))
106
+
107
+ causal_mask = torch.empty(x.shape[1], x.shape[1], dtype=x.dtype, device=x.device).fill_(float("-inf")).triu_(1)
108
+ if mask is not None:
109
+ mask += causal_mask
110
+ else:
111
+ mask = causal_mask
112
+
113
+ x, i = self.encoder(x, mask=mask, intermediate_output=intermediate_output)
114
+ x = self.final_layer_norm(x)
115
+ if i is not None and final_layer_norm_intermediate:
116
+ i = self.final_layer_norm(i)
117
+
118
+ pooled_output = x[torch.arange(x.shape[0], device=x.device), (torch.round(input_tokens).to(dtype=torch.int, device=x.device) == self.eos_token_id).int().argmax(dim=-1),]
119
+ return x, i, pooled_output
120
+
121
+ class CLIPTextModel(torch.nn.Module):
122
+ def __init__(self, config_dict, dtype, device, operations):
123
+ super().__init__()
124
+ self.num_layers = config_dict["num_hidden_layers"]
125
+ self.text_model = CLIPTextModel_(config_dict, dtype, device, operations)
126
+ embed_dim = config_dict["hidden_size"]
127
+ self.text_projection = operations.Linear(embed_dim, embed_dim, bias=False, dtype=dtype, device=device)
128
+ self.dtype = dtype
129
+
130
+ def get_input_embeddings(self):
131
+ return self.text_model.embeddings.token_embedding
132
+
133
+ def set_input_embeddings(self, embeddings):
134
+ self.text_model.embeddings.token_embedding = embeddings
135
+
136
+ def forward(self, *args, **kwargs):
137
+ x = self.text_model(*args, **kwargs)
138
+ out = self.text_projection(x[2])
139
+ return (x[0], x[1], out, x[2])
140
+
141
+
142
+ class CLIPVisionEmbeddings(torch.nn.Module):
143
+ def __init__(self, embed_dim, num_channels=3, patch_size=14, image_size=224, model_type="", dtype=None, device=None, operations=None):
144
+ super().__init__()
145
+
146
+ num_patches = (image_size // patch_size) ** 2
147
+ if model_type == "siglip_vision_model":
148
+ self.class_embedding = None
149
+ patch_bias = True
150
+ else:
151
+ num_patches = num_patches + 1
152
+ self.class_embedding = torch.nn.Parameter(torch.empty(embed_dim, dtype=dtype, device=device))
153
+ patch_bias = False
154
+
155
+ self.patch_embedding = operations.Conv2d(
156
+ in_channels=num_channels,
157
+ out_channels=embed_dim,
158
+ kernel_size=patch_size,
159
+ stride=patch_size,
160
+ bias=patch_bias,
161
+ dtype=dtype,
162
+ device=device
163
+ )
164
+
165
+ self.position_embedding = operations.Embedding(num_patches, embed_dim, dtype=dtype, device=device)
166
+
167
+ def forward(self, pixel_values):
168
+ embeds = self.patch_embedding(pixel_values).flatten(2).transpose(1, 2)
169
+ if self.class_embedding is not None:
170
+ embeds = torch.cat([comfy.ops.cast_to_input(self.class_embedding, embeds).expand(pixel_values.shape[0], 1, -1), embeds], dim=1)
171
+ return embeds + comfy.ops.cast_to_input(self.position_embedding.weight, embeds)
172
+
173
+
174
+ class CLIPVision(torch.nn.Module):
175
+ def __init__(self, config_dict, dtype, device, operations):
176
+ super().__init__()
177
+ num_layers = config_dict["num_hidden_layers"]
178
+ embed_dim = config_dict["hidden_size"]
179
+ heads = config_dict["num_attention_heads"]
180
+ intermediate_size = config_dict["intermediate_size"]
181
+ intermediate_activation = config_dict["hidden_act"]
182
+ model_type = config_dict["model_type"]
183
+
184
+ self.embeddings = CLIPVisionEmbeddings(embed_dim, config_dict["num_channels"], config_dict["patch_size"], config_dict["image_size"], model_type=model_type, dtype=dtype, device=device, operations=operations)
185
+ if model_type == "siglip_vision_model":
186
+ self.pre_layrnorm = lambda a: a
187
+ self.output_layernorm = True
188
+ else:
189
+ self.pre_layrnorm = operations.LayerNorm(embed_dim)
190
+ self.output_layernorm = False
191
+ self.encoder = CLIPEncoder(num_layers, embed_dim, heads, intermediate_size, intermediate_activation, dtype, device, operations)
192
+ self.post_layernorm = operations.LayerNorm(embed_dim)
193
+
194
+ def forward(self, pixel_values, attention_mask=None, intermediate_output=None):
195
+ x = self.embeddings(pixel_values)
196
+ x = self.pre_layrnorm(x)
197
+ #TODO: attention_mask?
198
+ x, i = self.encoder(x, mask=None, intermediate_output=intermediate_output)
199
+ if self.output_layernorm:
200
+ x = self.post_layernorm(x)
201
+ pooled_output = x
202
+ else:
203
+ pooled_output = self.post_layernorm(x[:, 0, :])
204
+ return x, i, pooled_output
205
+
206
+ class CLIPVisionModelProjection(torch.nn.Module):
207
+ def __init__(self, config_dict, dtype, device, operations):
208
+ super().__init__()
209
+ self.vision_model = CLIPVision(config_dict, dtype, device, operations)
210
+ if "projection_dim" in config_dict:
211
+ self.visual_projection = operations.Linear(config_dict["hidden_size"], config_dict["projection_dim"], bias=False)
212
+ else:
213
+ self.visual_projection = lambda a: a
214
+
215
+ def forward(self, *args, **kwargs):
216
+ x = self.vision_model(*args, **kwargs)
217
+ out = self.visual_projection(x[2])
218
+ return (x[0], x[1], out)
comfy/clip_vision.py ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .utils import load_torch_file, transformers_convert, state_dict_prefix_replace
2
+ import os
3
+ import torch
4
+ import json
5
+ import logging
6
+
7
+ import comfy.ops
8
+ import comfy.model_patcher
9
+ import comfy.model_management
10
+ import comfy.utils
11
+ import comfy.clip_model
12
+
13
+ class Output:
14
+ def __getitem__(self, key):
15
+ return getattr(self, key)
16
+ def __setitem__(self, key, item):
17
+ setattr(self, key, item)
18
+
19
+ def clip_preprocess(image, size=224, mean=[0.48145466, 0.4578275, 0.40821073], std=[0.26862954, 0.26130258, 0.27577711], crop=True):
20
+ mean = torch.tensor(mean, device=image.device, dtype=image.dtype)
21
+ std = torch.tensor(std, device=image.device, dtype=image.dtype)
22
+ image = image.movedim(-1, 1)
23
+ if not (image.shape[2] == size and image.shape[3] == size):
24
+ if crop:
25
+ scale = (size / min(image.shape[2], image.shape[3]))
26
+ scale_size = (round(scale * image.shape[2]), round(scale * image.shape[3]))
27
+ else:
28
+ scale_size = (size, size)
29
+
30
+ image = torch.nn.functional.interpolate(image, size=scale_size, mode="bicubic", antialias=True)
31
+ h = (image.shape[2] - size)//2
32
+ w = (image.shape[3] - size)//2
33
+ image = image[:,:,h:h+size,w:w+size]
34
+ image = torch.clip((255. * image), 0, 255).round() / 255.0
35
+ return (image - mean.view([3,1,1])) / std.view([3,1,1])
36
+
37
+ class ClipVisionModel():
38
+ def __init__(self, json_config):
39
+ with open(json_config) as f:
40
+ config = json.load(f)
41
+
42
+ self.image_size = config.get("image_size", 224)
43
+ self.image_mean = config.get("image_mean", [0.48145466, 0.4578275, 0.40821073])
44
+ self.image_std = config.get("image_std", [0.26862954, 0.26130258, 0.27577711])
45
+ self.load_device = comfy.model_management.text_encoder_device()
46
+ offload_device = comfy.model_management.text_encoder_offload_device()
47
+ self.dtype = comfy.model_management.text_encoder_dtype(self.load_device)
48
+ self.model = comfy.clip_model.CLIPVisionModelProjection(config, self.dtype, offload_device, comfy.ops.manual_cast)
49
+ self.model.eval()
50
+
51
+ self.patcher = comfy.model_patcher.ModelPatcher(self.model, load_device=self.load_device, offload_device=offload_device)
52
+
53
+ def load_sd(self, sd):
54
+ return self.model.load_state_dict(sd, strict=False)
55
+
56
+ def get_sd(self):
57
+ return self.model.state_dict()
58
+
59
+ def encode_image(self, image, crop=True):
60
+ comfy.model_management.load_model_gpu(self.patcher)
61
+ pixel_values = clip_preprocess(image.to(self.load_device), size=self.image_size, mean=self.image_mean, std=self.image_std, crop=crop).float()
62
+ out = self.model(pixel_values=pixel_values, intermediate_output=-2)
63
+
64
+ outputs = Output()
65
+ outputs["last_hidden_state"] = out[0].to(comfy.model_management.intermediate_device())
66
+ outputs["image_embeds"] = out[2].to(comfy.model_management.intermediate_device())
67
+ outputs["penultimate_hidden_states"] = out[1].to(comfy.model_management.intermediate_device())
68
+ return outputs
69
+
70
+ def convert_to_transformers(sd, prefix):
71
+ sd_k = sd.keys()
72
+ if "{}transformer.resblocks.0.attn.in_proj_weight".format(prefix) in sd_k:
73
+ keys_to_replace = {
74
+ "{}class_embedding".format(prefix): "vision_model.embeddings.class_embedding",
75
+ "{}conv1.weight".format(prefix): "vision_model.embeddings.patch_embedding.weight",
76
+ "{}positional_embedding".format(prefix): "vision_model.embeddings.position_embedding.weight",
77
+ "{}ln_post.bias".format(prefix): "vision_model.post_layernorm.bias",
78
+ "{}ln_post.weight".format(prefix): "vision_model.post_layernorm.weight",
79
+ "{}ln_pre.bias".format(prefix): "vision_model.pre_layrnorm.bias",
80
+ "{}ln_pre.weight".format(prefix): "vision_model.pre_layrnorm.weight",
81
+ }
82
+
83
+ for x in keys_to_replace:
84
+ if x in sd_k:
85
+ sd[keys_to_replace[x]] = sd.pop(x)
86
+
87
+ if "{}proj".format(prefix) in sd_k:
88
+ sd['visual_projection.weight'] = sd.pop("{}proj".format(prefix)).transpose(0, 1)
89
+
90
+ sd = transformers_convert(sd, prefix, "vision_model.", 48)
91
+ else:
92
+ replace_prefix = {prefix: ""}
93
+ sd = state_dict_prefix_replace(sd, replace_prefix)
94
+ return sd
95
+
96
+ def load_clipvision_from_sd(sd, prefix="", convert_keys=False):
97
+ if convert_keys:
98
+ sd = convert_to_transformers(sd, prefix)
99
+ if "vision_model.encoder.layers.47.layer_norm1.weight" in sd:
100
+ json_config = os.path.join(os.path.dirname(os.path.realpath(__file__)), "clip_vision_config_g.json")
101
+ elif "vision_model.encoder.layers.30.layer_norm1.weight" in sd:
102
+ json_config = os.path.join(os.path.dirname(os.path.realpath(__file__)), "clip_vision_config_h.json")
103
+ elif "vision_model.encoder.layers.22.layer_norm1.weight" in sd:
104
+ if sd["vision_model.encoder.layers.0.layer_norm1.weight"].shape[0] == 1152:
105
+ json_config = os.path.join(os.path.dirname(os.path.realpath(__file__)), "clip_vision_siglip_384.json")
106
+ elif sd["vision_model.embeddings.position_embedding.weight"].shape[0] == 577:
107
+ json_config = os.path.join(os.path.dirname(os.path.realpath(__file__)), "clip_vision_config_vitl_336.json")
108
+ else:
109
+ json_config = os.path.join(os.path.dirname(os.path.realpath(__file__)), "clip_vision_config_vitl.json")
110
+ else:
111
+ return None
112
+
113
+ clip = ClipVisionModel(json_config)
114
+ m, u = clip.load_sd(sd)
115
+ if len(m) > 0:
116
+ logging.warning("missing clip vision: {}".format(m))
117
+ u = set(u)
118
+ keys = list(sd.keys())
119
+ for k in keys:
120
+ if k not in u:
121
+ sd.pop(k)
122
+ return clip
123
+
124
+ def load(ckpt_path):
125
+ sd = load_torch_file(ckpt_path)
126
+ if "visual.transformer.resblocks.0.attn.in_proj_weight" in sd:
127
+ return load_clipvision_from_sd(sd, prefix="visual.", convert_keys=True)
128
+ else:
129
+ return load_clipvision_from_sd(sd)
comfy/clip_vision_config_g.json ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "attention_dropout": 0.0,
3
+ "dropout": 0.0,
4
+ "hidden_act": "gelu",
5
+ "hidden_size": 1664,
6
+ "image_size": 224,
7
+ "initializer_factor": 1.0,
8
+ "initializer_range": 0.02,
9
+ "intermediate_size": 8192,
10
+ "layer_norm_eps": 1e-05,
11
+ "model_type": "clip_vision_model",
12
+ "num_attention_heads": 16,
13
+ "num_channels": 3,
14
+ "num_hidden_layers": 48,
15
+ "patch_size": 14,
16
+ "projection_dim": 1280,
17
+ "torch_dtype": "float32"
18
+ }
comfy/clip_vision_config_h.json ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "attention_dropout": 0.0,
3
+ "dropout": 0.0,
4
+ "hidden_act": "gelu",
5
+ "hidden_size": 1280,
6
+ "image_size": 224,
7
+ "initializer_factor": 1.0,
8
+ "initializer_range": 0.02,
9
+ "intermediate_size": 5120,
10
+ "layer_norm_eps": 1e-05,
11
+ "model_type": "clip_vision_model",
12
+ "num_attention_heads": 16,
13
+ "num_channels": 3,
14
+ "num_hidden_layers": 32,
15
+ "patch_size": 14,
16
+ "projection_dim": 1024,
17
+ "torch_dtype": "float32"
18
+ }
comfy/clip_vision_config_vitl.json ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "attention_dropout": 0.0,
3
+ "dropout": 0.0,
4
+ "hidden_act": "quick_gelu",
5
+ "hidden_size": 1024,
6
+ "image_size": 224,
7
+ "initializer_factor": 1.0,
8
+ "initializer_range": 0.02,
9
+ "intermediate_size": 4096,
10
+ "layer_norm_eps": 1e-05,
11
+ "model_type": "clip_vision_model",
12
+ "num_attention_heads": 16,
13
+ "num_channels": 3,
14
+ "num_hidden_layers": 24,
15
+ "patch_size": 14,
16
+ "projection_dim": 768,
17
+ "torch_dtype": "float32"
18
+ }
comfy/clip_vision_config_vitl_336.json ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "attention_dropout": 0.0,
3
+ "dropout": 0.0,
4
+ "hidden_act": "quick_gelu",
5
+ "hidden_size": 1024,
6
+ "image_size": 336,
7
+ "initializer_factor": 1.0,
8
+ "initializer_range": 0.02,
9
+ "intermediate_size": 4096,
10
+ "layer_norm_eps": 1e-5,
11
+ "model_type": "clip_vision_model",
12
+ "num_attention_heads": 16,
13
+ "num_channels": 3,
14
+ "num_hidden_layers": 24,
15
+ "patch_size": 14,
16
+ "projection_dim": 768,
17
+ "torch_dtype": "float32"
18
+ }
comfy/clip_vision_siglip_384.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "num_channels": 3,
3
+ "hidden_act": "gelu_pytorch_tanh",
4
+ "hidden_size": 1152,
5
+ "image_size": 384,
6
+ "intermediate_size": 4304,
7
+ "model_type": "siglip_vision_model",
8
+ "num_attention_heads": 16,
9
+ "num_hidden_layers": 27,
10
+ "patch_size": 14,
11
+ "image_mean": [0.5, 0.5, 0.5],
12
+ "image_std": [0.5, 0.5, 0.5]
13
+ }
comfy/comfy_types/README.md ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Comfy Typing
2
+ ## Type hinting for ComfyUI Node development
3
+
4
+ This module provides type hinting and concrete convenience types for node developers.
5
+ If cloned to the custom_nodes directory of ComfyUI, types can be imported using:
6
+
7
+ ```python
8
+ from comfy.comfy_types import IO, ComfyNodeABC, CheckLazyMixin
9
+
10
+ class ExampleNode(ComfyNodeABC):
11
+ @classmethod
12
+ def INPUT_TYPES(s) -> InputTypeDict:
13
+ return {"required": {}}
14
+ ```
15
+
16
+ Full example is in [examples/example_nodes.py](examples/example_nodes.py).
17
+
18
+ # Types
19
+ A few primary types are documented below. More complete information is available via the docstrings on each type.
20
+
21
+ ## `IO`
22
+
23
+ A string enum of built-in and a few custom data types. Includes the following special types and their requisite plumbing:
24
+
25
+ - `ANY`: `"*"`
26
+ - `NUMBER`: `"FLOAT,INT"`
27
+ - `PRIMITIVE`: `"STRING,FLOAT,INT,BOOLEAN"`
28
+
29
+ ## `ComfyNodeABC`
30
+
31
+ An abstract base class for nodes, offering type-hinting / autocomplete, and somewhat-alright docstrings.
32
+
33
+ ### Type hinting for `INPUT_TYPES`
34
+
35
+ ![INPUT_TYPES auto-completion in Visual Studio Code](examples/input_types.png)
36
+
37
+ ### `INPUT_TYPES` return dict
38
+
39
+ ![INPUT_TYPES return value type hinting in Visual Studio Code](examples/required_hint.png)
40
+
41
+ ### Options for individual inputs
42
+
43
+ ![INPUT_TYPES return value option auto-completion in Visual Studio Code](examples/input_options.png)
comfy/comfy_types/__init__.py ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from typing import Callable, Protocol, TypedDict, Optional, List
3
+ from .node_typing import IO, InputTypeDict, ComfyNodeABC, CheckLazyMixin
4
+
5
+
6
+ class UnetApplyFunction(Protocol):
7
+ """Function signature protocol on comfy.model_base.BaseModel.apply_model"""
8
+
9
+ def __call__(self, x: torch.Tensor, t: torch.Tensor, **kwargs) -> torch.Tensor:
10
+ pass
11
+
12
+
13
+ class UnetApplyConds(TypedDict):
14
+ """Optional conditions for unet apply function."""
15
+
16
+ c_concat: Optional[torch.Tensor]
17
+ c_crossattn: Optional[torch.Tensor]
18
+ control: Optional[torch.Tensor]
19
+ transformer_options: Optional[dict]
20
+
21
+
22
+ class UnetParams(TypedDict):
23
+ # Tensor of shape [B, C, H, W]
24
+ input: torch.Tensor
25
+ # Tensor of shape [B]
26
+ timestep: torch.Tensor
27
+ c: UnetApplyConds
28
+ # List of [0, 1], [0], [1], ...
29
+ # 0 means conditional, 1 means conditional unconditional
30
+ cond_or_uncond: List[int]
31
+
32
+
33
+ UnetWrapperFunction = Callable[[UnetApplyFunction, UnetParams], torch.Tensor]
34
+
35
+
36
+ __all__ = [
37
+ "UnetWrapperFunction",
38
+ UnetApplyConds.__name__,
39
+ UnetParams.__name__,
40
+ UnetApplyFunction.__name__,
41
+ IO.__name__,
42
+ InputTypeDict.__name__,
43
+ ComfyNodeABC.__name__,
44
+ CheckLazyMixin.__name__,
45
+ ]
comfy/comfy_types/examples/example_nodes.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from comfy.comfy_types import IO, ComfyNodeABC, InputTypeDict
2
+ from inspect import cleandoc
3
+
4
+
5
+ class ExampleNode(ComfyNodeABC):
6
+ """An example node that just adds 1 to an input integer.
7
+
8
+ * Requires a modern IDE to provide any benefit (detail: an IDE configured with analysis paths etc).
9
+ * This node is intended as an example for developers only.
10
+ """
11
+
12
+ DESCRIPTION = cleandoc(__doc__)
13
+ CATEGORY = "examples"
14
+
15
+ @classmethod
16
+ def INPUT_TYPES(s) -> InputTypeDict:
17
+ return {
18
+ "required": {
19
+ "input_int": (IO.INT, {"defaultInput": True}),
20
+ }
21
+ }
22
+
23
+ RETURN_TYPES = (IO.INT,)
24
+ RETURN_NAMES = ("input_plus_one",)
25
+ FUNCTION = "execute"
26
+
27
+ def execute(self, input_int: int):
28
+ return (input_int + 1,)
comfy/comfy_types/examples/input_options.png ADDED
comfy/comfy_types/examples/input_types.png ADDED
comfy/comfy_types/examples/required_hint.png ADDED
comfy/comfy_types/node_typing.py ADDED
@@ -0,0 +1,274 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Comfy-specific type hinting"""
2
+
3
+ from __future__ import annotations
4
+ from typing import Literal, TypedDict
5
+ from abc import ABC, abstractmethod
6
+ from enum import Enum
7
+
8
+
9
+ class StrEnum(str, Enum):
10
+ """Base class for string enums. Python's StrEnum is not available until 3.11."""
11
+
12
+ def __str__(self) -> str:
13
+ return self.value
14
+
15
+
16
+ class IO(StrEnum):
17
+ """Node input/output data types.
18
+
19
+ Includes functionality for ``"*"`` (`ANY`) and ``"MULTI,TYPES"``.
20
+ """
21
+
22
+ STRING = "STRING"
23
+ IMAGE = "IMAGE"
24
+ MASK = "MASK"
25
+ LATENT = "LATENT"
26
+ BOOLEAN = "BOOLEAN"
27
+ INT = "INT"
28
+ FLOAT = "FLOAT"
29
+ CONDITIONING = "CONDITIONING"
30
+ SAMPLER = "SAMPLER"
31
+ SIGMAS = "SIGMAS"
32
+ GUIDER = "GUIDER"
33
+ NOISE = "NOISE"
34
+ CLIP = "CLIP"
35
+ CONTROL_NET = "CONTROL_NET"
36
+ VAE = "VAE"
37
+ MODEL = "MODEL"
38
+ CLIP_VISION = "CLIP_VISION"
39
+ CLIP_VISION_OUTPUT = "CLIP_VISION_OUTPUT"
40
+ STYLE_MODEL = "STYLE_MODEL"
41
+ GLIGEN = "GLIGEN"
42
+ UPSCALE_MODEL = "UPSCALE_MODEL"
43
+ AUDIO = "AUDIO"
44
+ WEBCAM = "WEBCAM"
45
+ POINT = "POINT"
46
+ FACE_ANALYSIS = "FACE_ANALYSIS"
47
+ BBOX = "BBOX"
48
+ SEGS = "SEGS"
49
+
50
+ ANY = "*"
51
+ """Always matches any type, but at a price.
52
+
53
+ Causes some functionality issues (e.g. reroutes, link types), and should be avoided whenever possible.
54
+ """
55
+ NUMBER = "FLOAT,INT"
56
+ """A float or an int - could be either"""
57
+ PRIMITIVE = "STRING,FLOAT,INT,BOOLEAN"
58
+ """Could be any of: string, float, int, or bool"""
59
+
60
+ def __ne__(self, value: object) -> bool:
61
+ if self == "*" or value == "*":
62
+ return False
63
+ if not isinstance(value, str):
64
+ return True
65
+ a = frozenset(self.split(","))
66
+ b = frozenset(value.split(","))
67
+ return not (b.issubset(a) or a.issubset(b))
68
+
69
+
70
+ class InputTypeOptions(TypedDict):
71
+ """Provides type hinting for the return type of the INPUT_TYPES node function.
72
+
73
+ Due to IDE limitations with unions, for now all options are available for all types (e.g. `label_on` is hinted even when the type is not `IO.BOOLEAN`).
74
+
75
+ Comfy Docs: https://docs.comfy.org/essentials/custom_node_datatypes
76
+ """
77
+
78
+ default: bool | str | float | int | list | tuple
79
+ """The default value of the widget"""
80
+ defaultInput: bool
81
+ """Defaults to an input slot rather than a widget"""
82
+ forceInput: bool
83
+ """`defaultInput` and also don't allow converting to a widget"""
84
+ lazy: bool
85
+ """Declares that this input uses lazy evaluation"""
86
+ rawLink: bool
87
+ """When a link exists, rather than receiving the evaluated value, you will receive the link (i.e. `["nodeId", <outputIndex>]`). Designed for node expansion."""
88
+ tooltip: str
89
+ """Tooltip for the input (or widget), shown on pointer hover"""
90
+ # class InputTypeNumber(InputTypeOptions):
91
+ # default: float | int
92
+ min: float
93
+ """The minimum value of a number (``FLOAT`` | ``INT``)"""
94
+ max: float
95
+ """The maximum value of a number (``FLOAT`` | ``INT``)"""
96
+ step: float
97
+ """The amount to increment or decrement a widget by when stepping up/down (``FLOAT`` | ``INT``)"""
98
+ round: float
99
+ """Floats are rounded by this value (``FLOAT``)"""
100
+ # class InputTypeBoolean(InputTypeOptions):
101
+ # default: bool
102
+ label_on: str
103
+ """The label to use in the UI when the bool is True (``BOOLEAN``)"""
104
+ label_on: str
105
+ """The label to use in the UI when the bool is False (``BOOLEAN``)"""
106
+ # class InputTypeString(InputTypeOptions):
107
+ # default: str
108
+ multiline: bool
109
+ """Use a multiline text box (``STRING``)"""
110
+ placeholder: str
111
+ """Placeholder text to display in the UI when empty (``STRING``)"""
112
+ # Deprecated:
113
+ # defaultVal: str
114
+ dynamicPrompts: bool
115
+ """Causes the front-end to evaluate dynamic prompts (``STRING``)"""
116
+
117
+
118
+ class HiddenInputTypeDict(TypedDict):
119
+ """Provides type hinting for the hidden entry of node INPUT_TYPES."""
120
+
121
+ node_id: Literal["UNIQUE_ID"]
122
+ """UNIQUE_ID is the unique identifier of the node, and matches the id property of the node on the client side. It is commonly used in client-server communications (see messages)."""
123
+ unique_id: Literal["UNIQUE_ID"]
124
+ """UNIQUE_ID is the unique identifier of the node, and matches the id property of the node on the client side. It is commonly used in client-server communications (see messages)."""
125
+ prompt: Literal["PROMPT"]
126
+ """PROMPT is the complete prompt sent by the client to the server. See the prompt object for a full description."""
127
+ extra_pnginfo: Literal["EXTRA_PNGINFO"]
128
+ """EXTRA_PNGINFO is a dictionary that will be copied into the metadata of any .png files saved. Custom nodes can store additional information in this dictionary for saving (or as a way to communicate with a downstream node)."""
129
+ dynprompt: Literal["DYNPROMPT"]
130
+ """DYNPROMPT is an instance of comfy_execution.graph.DynamicPrompt. It differs from PROMPT in that it may mutate during the course of execution in response to Node Expansion."""
131
+
132
+
133
+ class InputTypeDict(TypedDict):
134
+ """Provides type hinting for node INPUT_TYPES.
135
+
136
+ Comfy Docs: https://docs.comfy.org/essentials/custom_node_more_on_inputs
137
+ """
138
+
139
+ required: dict[str, tuple[IO, InputTypeOptions]]
140
+ """Describes all inputs that must be connected for the node to execute."""
141
+ optional: dict[str, tuple[IO, InputTypeOptions]]
142
+ """Describes inputs which do not need to be connected."""
143
+ hidden: HiddenInputTypeDict
144
+ """Offers advanced functionality and server-client communication.
145
+
146
+ Comfy Docs: https://docs.comfy.org/essentials/custom_node_more_on_inputs#hidden-inputs
147
+ """
148
+
149
+
150
+ class ComfyNodeABC(ABC):
151
+ """Abstract base class for Comfy nodes. Includes the names and expected types of attributes.
152
+
153
+ Comfy Docs: https://docs.comfy.org/essentials/custom_node_server_overview
154
+ """
155
+
156
+ DESCRIPTION: str
157
+ """Node description, shown as a tooltip when hovering over the node.
158
+
159
+ Usage::
160
+
161
+ # Explicitly define the description
162
+ DESCRIPTION = "Example description here."
163
+
164
+ # Use the docstring of the node class.
165
+ DESCRIPTION = cleandoc(__doc__)
166
+ """
167
+ CATEGORY: str
168
+ """The category of the node, as per the "Add Node" menu.
169
+
170
+ Comfy Docs: https://docs.comfy.org/essentials/custom_node_server_overview#category
171
+ """
172
+ EXPERIMENTAL: bool
173
+ """Flags a node as experimental, informing users that it may change or not work as expected."""
174
+ DEPRECATED: bool
175
+ """Flags a node as deprecated, indicating to users that they should find alternatives to this node."""
176
+
177
+ @classmethod
178
+ @abstractmethod
179
+ def INPUT_TYPES(s) -> InputTypeDict:
180
+ """Defines node inputs.
181
+
182
+ * Must include the ``required`` key, which describes all inputs that must be connected for the node to execute.
183
+ * The ``optional`` key can be added to describe inputs which do not need to be connected.
184
+ * The ``hidden`` key offers some advanced functionality. More info at: https://docs.comfy.org/essentials/custom_node_more_on_inputs#hidden-inputs
185
+
186
+ Comfy Docs: https://docs.comfy.org/essentials/custom_node_server_overview#input-types
187
+ """
188
+ return {"required": {}}
189
+
190
+ OUTPUT_NODE: bool
191
+ """Flags this node as an output node, causing any inputs it requires to be executed.
192
+
193
+ If a node is not connected to any output nodes, that node will not be executed. Usage::
194
+
195
+ OUTPUT_NODE = True
196
+
197
+ From the docs:
198
+
199
+ By default, a node is not considered an output. Set ``OUTPUT_NODE = True`` to specify that it is.
200
+
201
+ Comfy Docs: https://docs.comfy.org/essentials/custom_node_server_overview#output-node
202
+ """
203
+ INPUT_IS_LIST: bool
204
+ """A flag indicating if this node implements the additional code necessary to deal with OUTPUT_IS_LIST nodes.
205
+
206
+ All inputs of ``type`` will become ``list[type]``, regardless of how many items are passed in. This also affects ``check_lazy_status``.
207
+
208
+ From the docs:
209
+
210
+ A node can also override the default input behaviour and receive the whole list in a single call. This is done by setting a class attribute `INPUT_IS_LIST` to ``True``.
211
+
212
+ Comfy Docs: https://docs.comfy.org/essentials/custom_node_lists#list-processing
213
+ """
214
+ OUTPUT_IS_LIST: tuple[bool]
215
+ """A tuple indicating which node outputs are lists, but will be connected to nodes that expect individual items.
216
+
217
+ Connected nodes that do not implement `INPUT_IS_LIST` will be executed once for every item in the list.
218
+
219
+ A ``tuple[bool]``, where the items match those in `RETURN_TYPES`::
220
+
221
+ RETURN_TYPES = (IO.INT, IO.INT, IO.STRING)
222
+ OUTPUT_IS_LIST = (True, True, False) # The string output will be handled normally
223
+
224
+ From the docs:
225
+
226
+ In order to tell Comfy that the list being returned should not be wrapped, but treated as a series of data for sequential processing,
227
+ the node should provide a class attribute `OUTPUT_IS_LIST`, which is a ``tuple[bool]``, of the same length as `RETURN_TYPES`,
228
+ specifying which outputs which should be so treated.
229
+
230
+ Comfy Docs: https://docs.comfy.org/essentials/custom_node_lists#list-processing
231
+ """
232
+
233
+ RETURN_TYPES: tuple[IO]
234
+ """A tuple representing the outputs of this node.
235
+
236
+ Usage::
237
+
238
+ RETURN_TYPES = (IO.INT, "INT", "CUSTOM_TYPE")
239
+
240
+ Comfy Docs: https://docs.comfy.org/essentials/custom_node_server_overview#return-types
241
+ """
242
+ RETURN_NAMES: tuple[str]
243
+ """The output slot names for each item in `RETURN_TYPES`, e.g. ``RETURN_NAMES = ("count", "filter_string")``
244
+
245
+ Comfy Docs: https://docs.comfy.org/essentials/custom_node_server_overview#return-names
246
+ """
247
+ OUTPUT_TOOLTIPS: tuple[str]
248
+ """A tuple of strings to use as tooltips for node outputs, one for each item in `RETURN_TYPES`."""
249
+ FUNCTION: str
250
+ """The name of the function to execute as a literal string, e.g. `FUNCTION = "execute"`
251
+
252
+ Comfy Docs: https://docs.comfy.org/essentials/custom_node_server_overview#function
253
+ """
254
+
255
+
256
+ class CheckLazyMixin:
257
+ """Provides a basic check_lazy_status implementation and type hinting for nodes that use lazy inputs."""
258
+
259
+ def check_lazy_status(self, **kwargs) -> list[str]:
260
+ """Returns a list of input names that should be evaluated.
261
+
262
+ This basic mixin impl. requires all inputs.
263
+
264
+ :kwargs: All node inputs will be included here. If the input is ``None``, it should be assumed that it has not yet been evaluated. \
265
+ When using ``INPUT_IS_LIST = True``, unevaluated will instead be ``(None,)``.
266
+
267
+ Params should match the nodes execution ``FUNCTION`` (self, and all inputs by name).
268
+ Will be executed repeatedly until it returns an empty list, or all requested items were already evaluated (and sent as params).
269
+
270
+ Comfy Docs: https://docs.comfy.org/essentials/custom_node_lazy_evaluation#defining-check-lazy-status
271
+ """
272
+
273
+ need = [name for name in kwargs if kwargs[name] is None]
274
+ return need
comfy/conds.py ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import math
3
+ import comfy.utils
4
+
5
+
6
+ def lcm(a, b): #TODO: eventually replace by math.lcm (added in python3.9)
7
+ return abs(a*b) // math.gcd(a, b)
8
+
9
+ class CONDRegular:
10
+ def __init__(self, cond):
11
+ self.cond = cond
12
+
13
+ def _copy_with(self, cond):
14
+ return self.__class__(cond)
15
+
16
+ def process_cond(self, batch_size, device, **kwargs):
17
+ return self._copy_with(comfy.utils.repeat_to_batch_size(self.cond, batch_size).to(device))
18
+
19
+ def can_concat(self, other):
20
+ if self.cond.shape != other.cond.shape:
21
+ return False
22
+ return True
23
+
24
+ def concat(self, others):
25
+ conds = [self.cond]
26
+ for x in others:
27
+ conds.append(x.cond)
28
+ return torch.cat(conds)
29
+
30
+ class CONDNoiseShape(CONDRegular):
31
+ def process_cond(self, batch_size, device, area, **kwargs):
32
+ data = self.cond
33
+ if area is not None:
34
+ dims = len(area) // 2
35
+ for i in range(dims):
36
+ data = data.narrow(i + 2, area[i + dims], area[i])
37
+
38
+ return self._copy_with(comfy.utils.repeat_to_batch_size(data, batch_size).to(device))
39
+
40
+
41
+ class CONDCrossAttn(CONDRegular):
42
+ def can_concat(self, other):
43
+ s1 = self.cond.shape
44
+ s2 = other.cond.shape
45
+ if s1 != s2:
46
+ if s1[0] != s2[0] or s1[2] != s2[2]: #these 2 cases should not happen
47
+ return False
48
+
49
+ mult_min = lcm(s1[1], s2[1])
50
+ diff = mult_min // min(s1[1], s2[1])
51
+ if diff > 4: #arbitrary limit on the padding because it's probably going to impact performance negatively if it's too much
52
+ return False
53
+ return True
54
+
55
+ def concat(self, others):
56
+ conds = [self.cond]
57
+ crossattn_max_len = self.cond.shape[1]
58
+ for x in others:
59
+ c = x.cond
60
+ crossattn_max_len = lcm(crossattn_max_len, c.shape[1])
61
+ conds.append(c)
62
+
63
+ out = []
64
+ for c in conds:
65
+ if c.shape[1] < crossattn_max_len:
66
+ c = c.repeat(1, crossattn_max_len // c.shape[1], 1) #padding with repeat doesn't change result
67
+ out.append(c)
68
+ return torch.cat(out)
69
+
70
+ class CONDConstant(CONDRegular):
71
+ def __init__(self, cond):
72
+ self.cond = cond
73
+
74
+ def process_cond(self, batch_size, device, **kwargs):
75
+ return self._copy_with(self.cond)
76
+
77
+ def can_concat(self, other):
78
+ if self.cond != other.cond:
79
+ return False
80
+ return True
81
+
82
+ def concat(self, others):
83
+ return self.cond
comfy/controlnet.py ADDED
@@ -0,0 +1,862 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ This file is part of ComfyUI.
3
+ Copyright (C) 2024 Comfy
4
+
5
+ This program is free software: you can redistribute it and/or modify
6
+ it under the terms of the GNU General Public License as published by
7
+ the Free Software Foundation, either version 3 of the License, or
8
+ (at your option) any later version.
9
+
10
+ This program is distributed in the hope that it will be useful,
11
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
12
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13
+ GNU General Public License for more details.
14
+
15
+ You should have received a copy of the GNU General Public License
16
+ along with this program. If not, see <https://www.gnu.org/licenses/>.
17
+ """
18
+
19
+
20
+ import torch
21
+ from enum import Enum
22
+ import math
23
+ import os
24
+ import logging
25
+ import comfy.utils
26
+ import comfy.model_management
27
+ import comfy.model_detection
28
+ import comfy.model_patcher
29
+ import comfy.ops
30
+ import comfy.latent_formats
31
+
32
+ import comfy.cldm.cldm
33
+ import comfy.t2i_adapter.adapter
34
+ import comfy.ldm.cascade.controlnet
35
+ import comfy.cldm.mmdit
36
+ import comfy.ldm.hydit.controlnet
37
+ import comfy.ldm.flux.controlnet
38
+ import comfy.cldm.dit_embedder
39
+ from typing import TYPE_CHECKING
40
+ if TYPE_CHECKING:
41
+ from comfy.hooks import HookGroup
42
+
43
+
44
+ def broadcast_image_to(tensor, target_batch_size, batched_number):
45
+ current_batch_size = tensor.shape[0]
46
+ #print(current_batch_size, target_batch_size)
47
+ if current_batch_size == 1:
48
+ return tensor
49
+
50
+ per_batch = target_batch_size // batched_number
51
+ tensor = tensor[:per_batch]
52
+
53
+ if per_batch > tensor.shape[0]:
54
+ tensor = torch.cat([tensor] * (per_batch // tensor.shape[0]) + [tensor[:(per_batch % tensor.shape[0])]], dim=0)
55
+
56
+ current_batch_size = tensor.shape[0]
57
+ if current_batch_size == target_batch_size:
58
+ return tensor
59
+ else:
60
+ return torch.cat([tensor] * batched_number, dim=0)
61
+
62
+ class StrengthType(Enum):
63
+ CONSTANT = 1
64
+ LINEAR_UP = 2
65
+
66
+ class ControlBase:
67
+ def __init__(self):
68
+ self.cond_hint_original = None
69
+ self.cond_hint = None
70
+ self.strength = 1.0
71
+ self.timestep_percent_range = (0.0, 1.0)
72
+ self.latent_format = None
73
+ self.vae = None
74
+ self.global_average_pooling = False
75
+ self.timestep_range = None
76
+ self.compression_ratio = 8
77
+ self.upscale_algorithm = 'nearest-exact'
78
+ self.extra_args = {}
79
+ self.previous_controlnet = None
80
+ self.extra_conds = []
81
+ self.strength_type = StrengthType.CONSTANT
82
+ self.concat_mask = False
83
+ self.extra_concat_orig = []
84
+ self.extra_concat = None
85
+ self.extra_hooks: HookGroup = None
86
+ self.preprocess_image = lambda a: a
87
+
88
+ def set_cond_hint(self, cond_hint, strength=1.0, timestep_percent_range=(0.0, 1.0), vae=None, extra_concat=[]):
89
+ self.cond_hint_original = cond_hint
90
+ self.strength = strength
91
+ self.timestep_percent_range = timestep_percent_range
92
+ if self.latent_format is not None:
93
+ if vae is None:
94
+ logging.warning("WARNING: no VAE provided to the controlnet apply node when this controlnet requires one.")
95
+ self.vae = vae
96
+ self.extra_concat_orig = extra_concat.copy()
97
+ if self.concat_mask and len(self.extra_concat_orig) == 0:
98
+ self.extra_concat_orig.append(torch.tensor([[[[1.0]]]]))
99
+ return self
100
+
101
+ def pre_run(self, model, percent_to_timestep_function):
102
+ self.timestep_range = (percent_to_timestep_function(self.timestep_percent_range[0]), percent_to_timestep_function(self.timestep_percent_range[1]))
103
+ if self.previous_controlnet is not None:
104
+ self.previous_controlnet.pre_run(model, percent_to_timestep_function)
105
+
106
+ def set_previous_controlnet(self, controlnet):
107
+ self.previous_controlnet = controlnet
108
+ return self
109
+
110
+ def cleanup(self):
111
+ if self.previous_controlnet is not None:
112
+ self.previous_controlnet.cleanup()
113
+
114
+ self.cond_hint = None
115
+ self.extra_concat = None
116
+ self.timestep_range = None
117
+
118
+ def get_models(self):
119
+ out = []
120
+ if self.previous_controlnet is not None:
121
+ out += self.previous_controlnet.get_models()
122
+ return out
123
+
124
+ def get_extra_hooks(self):
125
+ out = []
126
+ if self.extra_hooks is not None:
127
+ out.append(self.extra_hooks)
128
+ if self.previous_controlnet is not None:
129
+ out += self.previous_controlnet.get_extra_hooks()
130
+ return out
131
+
132
+ def copy_to(self, c):
133
+ c.cond_hint_original = self.cond_hint_original
134
+ c.strength = self.strength
135
+ c.timestep_percent_range = self.timestep_percent_range
136
+ c.global_average_pooling = self.global_average_pooling
137
+ c.compression_ratio = self.compression_ratio
138
+ c.upscale_algorithm = self.upscale_algorithm
139
+ c.latent_format = self.latent_format
140
+ c.extra_args = self.extra_args.copy()
141
+ c.vae = self.vae
142
+ c.extra_conds = self.extra_conds.copy()
143
+ c.strength_type = self.strength_type
144
+ c.concat_mask = self.concat_mask
145
+ c.extra_concat_orig = self.extra_concat_orig.copy()
146
+ c.extra_hooks = self.extra_hooks.clone() if self.extra_hooks else None
147
+ c.preprocess_image = self.preprocess_image
148
+
149
+ def inference_memory_requirements(self, dtype):
150
+ if self.previous_controlnet is not None:
151
+ return self.previous_controlnet.inference_memory_requirements(dtype)
152
+ return 0
153
+
154
+ def control_merge(self, control, control_prev, output_dtype):
155
+ out = {'input':[], 'middle':[], 'output': []}
156
+
157
+ for key in control:
158
+ control_output = control[key]
159
+ applied_to = set()
160
+ for i in range(len(control_output)):
161
+ x = control_output[i]
162
+ if x is not None:
163
+ if self.global_average_pooling:
164
+ x = torch.mean(x, dim=(2, 3), keepdim=True).repeat(1, 1, x.shape[2], x.shape[3])
165
+
166
+ if x not in applied_to: #memory saving strategy, allow shared tensors and only apply strength to shared tensors once
167
+ applied_to.add(x)
168
+ if self.strength_type == StrengthType.CONSTANT:
169
+ x *= self.strength
170
+ elif self.strength_type == StrengthType.LINEAR_UP:
171
+ x *= (self.strength ** float(len(control_output) - i))
172
+
173
+ if output_dtype is not None and x.dtype != output_dtype:
174
+ x = x.to(output_dtype)
175
+
176
+ out[key].append(x)
177
+
178
+ if control_prev is not None:
179
+ for x in ['input', 'middle', 'output']:
180
+ o = out[x]
181
+ for i in range(len(control_prev[x])):
182
+ prev_val = control_prev[x][i]
183
+ if i >= len(o):
184
+ o.append(prev_val)
185
+ elif prev_val is not None:
186
+ if o[i] is None:
187
+ o[i] = prev_val
188
+ else:
189
+ if o[i].shape[0] < prev_val.shape[0]:
190
+ o[i] = prev_val + o[i]
191
+ else:
192
+ o[i] = prev_val + o[i] #TODO: change back to inplace add if shared tensors stop being an issue
193
+ return out
194
+
195
+ def set_extra_arg(self, argument, value=None):
196
+ self.extra_args[argument] = value
197
+
198
+
199
+ class ControlNet(ControlBase):
200
+ def __init__(self, control_model=None, global_average_pooling=False, compression_ratio=8, latent_format=None, load_device=None, manual_cast_dtype=None, extra_conds=["y"], strength_type=StrengthType.CONSTANT, concat_mask=False, preprocess_image=lambda a: a):
201
+ super().__init__()
202
+ self.control_model = control_model
203
+ self.load_device = load_device
204
+ if control_model is not None:
205
+ self.control_model_wrapped = comfy.model_patcher.ModelPatcher(self.control_model, load_device=load_device, offload_device=comfy.model_management.unet_offload_device())
206
+
207
+ self.compression_ratio = compression_ratio
208
+ self.global_average_pooling = global_average_pooling
209
+ self.model_sampling_current = None
210
+ self.manual_cast_dtype = manual_cast_dtype
211
+ self.latent_format = latent_format
212
+ self.extra_conds += extra_conds
213
+ self.strength_type = strength_type
214
+ self.concat_mask = concat_mask
215
+ self.preprocess_image = preprocess_image
216
+
217
+ def get_control(self, x_noisy, t, cond, batched_number, transformer_options):
218
+ control_prev = None
219
+ if self.previous_controlnet is not None:
220
+ control_prev = self.previous_controlnet.get_control(x_noisy, t, cond, batched_number, transformer_options)
221
+
222
+ if self.timestep_range is not None:
223
+ if t[0] > self.timestep_range[0] or t[0] < self.timestep_range[1]:
224
+ if control_prev is not None:
225
+ return control_prev
226
+ else:
227
+ return None
228
+
229
+ dtype = self.control_model.dtype
230
+ if self.manual_cast_dtype is not None:
231
+ dtype = self.manual_cast_dtype
232
+
233
+ if self.cond_hint is None or x_noisy.shape[2] * self.compression_ratio != self.cond_hint.shape[2] or x_noisy.shape[3] * self.compression_ratio != self.cond_hint.shape[3]:
234
+ if self.cond_hint is not None:
235
+ del self.cond_hint
236
+ self.cond_hint = None
237
+ compression_ratio = self.compression_ratio
238
+ if self.vae is not None:
239
+ compression_ratio *= self.vae.downscale_ratio
240
+ else:
241
+ if self.latent_format is not None:
242
+ raise ValueError("This Controlnet needs a VAE but none was provided, please use a ControlNetApply node with a VAE input and connect it.")
243
+ self.cond_hint = comfy.utils.common_upscale(self.cond_hint_original, x_noisy.shape[3] * compression_ratio, x_noisy.shape[2] * compression_ratio, self.upscale_algorithm, "center")
244
+ self.cond_hint = self.preprocess_image(self.cond_hint)
245
+ if self.vae is not None:
246
+ loaded_models = comfy.model_management.loaded_models(only_currently_used=True)
247
+ self.cond_hint = self.vae.encode(self.cond_hint.movedim(1, -1))
248
+ comfy.model_management.load_models_gpu(loaded_models)
249
+ if self.latent_format is not None:
250
+ self.cond_hint = self.latent_format.process_in(self.cond_hint)
251
+ if len(self.extra_concat_orig) > 0:
252
+ to_concat = []
253
+ for c in self.extra_concat_orig:
254
+ c = c.to(self.cond_hint.device)
255
+ c = comfy.utils.common_upscale(c, self.cond_hint.shape[3], self.cond_hint.shape[2], self.upscale_algorithm, "center")
256
+ to_concat.append(comfy.utils.repeat_to_batch_size(c, self.cond_hint.shape[0]))
257
+ self.cond_hint = torch.cat([self.cond_hint] + to_concat, dim=1)
258
+
259
+ self.cond_hint = self.cond_hint.to(device=x_noisy.device, dtype=dtype)
260
+ if x_noisy.shape[0] != self.cond_hint.shape[0]:
261
+ self.cond_hint = broadcast_image_to(self.cond_hint, x_noisy.shape[0], batched_number)
262
+
263
+ context = cond.get('crossattn_controlnet', cond['c_crossattn'])
264
+ extra = self.extra_args.copy()
265
+ for c in self.extra_conds:
266
+ temp = cond.get(c, None)
267
+ if temp is not None:
268
+ extra[c] = temp.to(dtype)
269
+
270
+ timestep = self.model_sampling_current.timestep(t)
271
+ x_noisy = self.model_sampling_current.calculate_input(t, x_noisy)
272
+
273
+ control = self.control_model(x=x_noisy.to(dtype), hint=self.cond_hint, timesteps=timestep.to(dtype), context=context.to(dtype), **extra)
274
+ return self.control_merge(control, control_prev, output_dtype=None)
275
+
276
+ def copy(self):
277
+ c = ControlNet(None, global_average_pooling=self.global_average_pooling, load_device=self.load_device, manual_cast_dtype=self.manual_cast_dtype)
278
+ c.control_model = self.control_model
279
+ c.control_model_wrapped = self.control_model_wrapped
280
+ self.copy_to(c)
281
+ return c
282
+
283
+ def get_models(self):
284
+ out = super().get_models()
285
+ out.append(self.control_model_wrapped)
286
+ return out
287
+
288
+ def pre_run(self, model, percent_to_timestep_function):
289
+ super().pre_run(model, percent_to_timestep_function)
290
+ self.model_sampling_current = model.model_sampling
291
+
292
+ def cleanup(self):
293
+ self.model_sampling_current = None
294
+ super().cleanup()
295
+
296
+ class ControlLoraOps:
297
+ class Linear(torch.nn.Module, comfy.ops.CastWeightBiasOp):
298
+ def __init__(self, in_features: int, out_features: int, bias: bool = True,
299
+ device=None, dtype=None) -> None:
300
+ super().__init__()
301
+ self.in_features = in_features
302
+ self.out_features = out_features
303
+ self.weight = None
304
+ self.up = None
305
+ self.down = None
306
+ self.bias = None
307
+
308
+ def forward(self, input):
309
+ weight, bias = comfy.ops.cast_bias_weight(self, input)
310
+ if self.up is not None:
311
+ return torch.nn.functional.linear(input, weight + (torch.mm(self.up.flatten(start_dim=1), self.down.flatten(start_dim=1))).reshape(self.weight.shape).type(input.dtype), bias)
312
+ else:
313
+ return torch.nn.functional.linear(input, weight, bias)
314
+
315
+ class Conv2d(torch.nn.Module, comfy.ops.CastWeightBiasOp):
316
+ def __init__(
317
+ self,
318
+ in_channels,
319
+ out_channels,
320
+ kernel_size,
321
+ stride=1,
322
+ padding=0,
323
+ dilation=1,
324
+ groups=1,
325
+ bias=True,
326
+ padding_mode='zeros',
327
+ device=None,
328
+ dtype=None
329
+ ):
330
+ super().__init__()
331
+ self.in_channels = in_channels
332
+ self.out_channels = out_channels
333
+ self.kernel_size = kernel_size
334
+ self.stride = stride
335
+ self.padding = padding
336
+ self.dilation = dilation
337
+ self.transposed = False
338
+ self.output_padding = 0
339
+ self.groups = groups
340
+ self.padding_mode = padding_mode
341
+
342
+ self.weight = None
343
+ self.bias = None
344
+ self.up = None
345
+ self.down = None
346
+
347
+
348
+ def forward(self, input):
349
+ weight, bias = comfy.ops.cast_bias_weight(self, input)
350
+ if self.up is not None:
351
+ return torch.nn.functional.conv2d(input, weight + (torch.mm(self.up.flatten(start_dim=1), self.down.flatten(start_dim=1))).reshape(self.weight.shape).type(input.dtype), bias, self.stride, self.padding, self.dilation, self.groups)
352
+ else:
353
+ return torch.nn.functional.conv2d(input, weight, bias, self.stride, self.padding, self.dilation, self.groups)
354
+
355
+
356
+ class ControlLora(ControlNet):
357
+ def __init__(self, control_weights, global_average_pooling=False, model_options={}): #TODO? model_options
358
+ ControlBase.__init__(self)
359
+ self.control_weights = control_weights
360
+ self.global_average_pooling = global_average_pooling
361
+ self.extra_conds += ["y"]
362
+
363
+ def pre_run(self, model, percent_to_timestep_function):
364
+ super().pre_run(model, percent_to_timestep_function)
365
+ controlnet_config = model.model_config.unet_config.copy()
366
+ controlnet_config.pop("out_channels")
367
+ controlnet_config["hint_channels"] = self.control_weights["input_hint_block.0.weight"].shape[1]
368
+ self.manual_cast_dtype = model.manual_cast_dtype
369
+ dtype = model.get_dtype()
370
+ if self.manual_cast_dtype is None:
371
+ class control_lora_ops(ControlLoraOps, comfy.ops.disable_weight_init):
372
+ pass
373
+ else:
374
+ class control_lora_ops(ControlLoraOps, comfy.ops.manual_cast):
375
+ pass
376
+ dtype = self.manual_cast_dtype
377
+
378
+ controlnet_config["operations"] = control_lora_ops
379
+ controlnet_config["dtype"] = dtype
380
+ self.control_model = comfy.cldm.cldm.ControlNet(**controlnet_config)
381
+ self.control_model.to(comfy.model_management.get_torch_device())
382
+ diffusion_model = model.diffusion_model
383
+ sd = diffusion_model.state_dict()
384
+
385
+ for k in sd:
386
+ weight = sd[k]
387
+ try:
388
+ comfy.utils.set_attr_param(self.control_model, k, weight)
389
+ except:
390
+ pass
391
+
392
+ for k in self.control_weights:
393
+ if k not in {"lora_controlnet"}:
394
+ comfy.utils.set_attr_param(self.control_model, k, self.control_weights[k].to(dtype).to(comfy.model_management.get_torch_device()))
395
+
396
+ def copy(self):
397
+ c = ControlLora(self.control_weights, global_average_pooling=self.global_average_pooling)
398
+ self.copy_to(c)
399
+ return c
400
+
401
+ def cleanup(self):
402
+ del self.control_model
403
+ self.control_model = None
404
+ super().cleanup()
405
+
406
+ def get_models(self):
407
+ out = ControlBase.get_models(self)
408
+ return out
409
+
410
+ def inference_memory_requirements(self, dtype):
411
+ return comfy.utils.calculate_parameters(self.control_weights) * comfy.model_management.dtype_size(dtype) + ControlBase.inference_memory_requirements(self, dtype)
412
+
413
+ def controlnet_config(sd, model_options={}):
414
+ model_config = comfy.model_detection.model_config_from_unet(sd, "", True)
415
+
416
+ unet_dtype = model_options.get("dtype", None)
417
+ if unet_dtype is None:
418
+ weight_dtype = comfy.utils.weight_dtype(sd)
419
+
420
+ supported_inference_dtypes = list(model_config.supported_inference_dtypes)
421
+ if weight_dtype is not None:
422
+ supported_inference_dtypes.append(weight_dtype)
423
+
424
+ unet_dtype = comfy.model_management.unet_dtype(model_params=-1, supported_dtypes=supported_inference_dtypes)
425
+
426
+ load_device = comfy.model_management.get_torch_device()
427
+ manual_cast_dtype = comfy.model_management.unet_manual_cast(unet_dtype, load_device)
428
+
429
+ operations = model_options.get("custom_operations", None)
430
+ if operations is None:
431
+ operations = comfy.ops.pick_operations(unet_dtype, manual_cast_dtype, disable_fast_fp8=True)
432
+
433
+ offload_device = comfy.model_management.unet_offload_device()
434
+ return model_config, operations, load_device, unet_dtype, manual_cast_dtype, offload_device
435
+
436
+ def controlnet_load_state_dict(control_model, sd):
437
+ missing, unexpected = control_model.load_state_dict(sd, strict=False)
438
+
439
+ if len(missing) > 0:
440
+ logging.warning("missing controlnet keys: {}".format(missing))
441
+
442
+ if len(unexpected) > 0:
443
+ logging.debug("unexpected controlnet keys: {}".format(unexpected))
444
+ return control_model
445
+
446
+
447
+ def load_controlnet_mmdit(sd, model_options={}):
448
+ new_sd = comfy.model_detection.convert_diffusers_mmdit(sd, "")
449
+ model_config, operations, load_device, unet_dtype, manual_cast_dtype, offload_device = controlnet_config(new_sd, model_options=model_options)
450
+ num_blocks = comfy.model_detection.count_blocks(new_sd, 'joint_blocks.{}.')
451
+ for k in sd:
452
+ new_sd[k] = sd[k]
453
+
454
+ concat_mask = False
455
+ control_latent_channels = new_sd.get("pos_embed_input.proj.weight").shape[1]
456
+ if control_latent_channels == 17: #inpaint controlnet
457
+ concat_mask = True
458
+
459
+ control_model = comfy.cldm.mmdit.ControlNet(num_blocks=num_blocks, control_latent_channels=control_latent_channels, operations=operations, device=offload_device, dtype=unet_dtype, **model_config.unet_config)
460
+ control_model = controlnet_load_state_dict(control_model, new_sd)
461
+
462
+ latent_format = comfy.latent_formats.SD3()
463
+ latent_format.shift_factor = 0 #SD3 controlnet weirdness
464
+ control = ControlNet(control_model, compression_ratio=1, latent_format=latent_format, concat_mask=concat_mask, load_device=load_device, manual_cast_dtype=manual_cast_dtype)
465
+ return control
466
+
467
+
468
+ class ControlNetSD35(ControlNet):
469
+ def pre_run(self, model, percent_to_timestep_function):
470
+ if self.control_model.double_y_emb:
471
+ missing, unexpected = self.control_model.orig_y_embedder.load_state_dict(model.diffusion_model.y_embedder.state_dict(), strict=False)
472
+ else:
473
+ missing, unexpected = self.control_model.x_embedder.load_state_dict(model.diffusion_model.x_embedder.state_dict(), strict=False)
474
+ super().pre_run(model, percent_to_timestep_function)
475
+
476
+ def copy(self):
477
+ c = ControlNetSD35(None, global_average_pooling=self.global_average_pooling, load_device=self.load_device, manual_cast_dtype=self.manual_cast_dtype)
478
+ c.control_model = self.control_model
479
+ c.control_model_wrapped = self.control_model_wrapped
480
+ self.copy_to(c)
481
+ return c
482
+
483
+ def load_controlnet_sd35(sd, model_options={}):
484
+ control_type = -1
485
+ if "control_type" in sd:
486
+ control_type = round(sd.pop("control_type").item())
487
+
488
+ # blur_cnet = control_type == 0
489
+ canny_cnet = control_type == 1
490
+ depth_cnet = control_type == 2
491
+
492
+ new_sd = {}
493
+ for k in comfy.utils.MMDIT_MAP_BASIC:
494
+ if k[1] in sd:
495
+ new_sd[k[0]] = sd.pop(k[1])
496
+ for k in sd:
497
+ new_sd[k] = sd[k]
498
+ sd = new_sd
499
+
500
+ y_emb_shape = sd["y_embedder.mlp.0.weight"].shape
501
+ depth = y_emb_shape[0] // 64
502
+ hidden_size = 64 * depth
503
+ num_heads = depth
504
+ head_dim = hidden_size // num_heads
505
+ num_blocks = comfy.model_detection.count_blocks(new_sd, 'transformer_blocks.{}.')
506
+
507
+ load_device = comfy.model_management.get_torch_device()
508
+ offload_device = comfy.model_management.unet_offload_device()
509
+ unet_dtype = comfy.model_management.unet_dtype(model_params=-1)
510
+
511
+ manual_cast_dtype = comfy.model_management.unet_manual_cast(unet_dtype, load_device)
512
+
513
+ operations = model_options.get("custom_operations", None)
514
+ if operations is None:
515
+ operations = comfy.ops.pick_operations(unet_dtype, manual_cast_dtype, disable_fast_fp8=True)
516
+
517
+ control_model = comfy.cldm.dit_embedder.ControlNetEmbedder(img_size=None,
518
+ patch_size=2,
519
+ in_chans=16,
520
+ num_layers=num_blocks,
521
+ main_model_double=depth,
522
+ double_y_emb=y_emb_shape[0] == y_emb_shape[1],
523
+ attention_head_dim=head_dim,
524
+ num_attention_heads=num_heads,
525
+ adm_in_channels=2048,
526
+ device=offload_device,
527
+ dtype=unet_dtype,
528
+ operations=operations)
529
+
530
+ control_model = controlnet_load_state_dict(control_model, sd)
531
+
532
+ latent_format = comfy.latent_formats.SD3()
533
+ preprocess_image = lambda a: a
534
+ if canny_cnet:
535
+ preprocess_image = lambda a: (a * 255 * 0.5 + 0.5)
536
+ elif depth_cnet:
537
+ preprocess_image = lambda a: 1.0 - a
538
+
539
+ control = ControlNetSD35(control_model, compression_ratio=1, latent_format=latent_format, load_device=load_device, manual_cast_dtype=manual_cast_dtype, preprocess_image=preprocess_image)
540
+ return control
541
+
542
+
543
+
544
+ def load_controlnet_hunyuandit(controlnet_data, model_options={}):
545
+ model_config, operations, load_device, unet_dtype, manual_cast_dtype, offload_device = controlnet_config(controlnet_data, model_options=model_options)
546
+
547
+ control_model = comfy.ldm.hydit.controlnet.HunYuanControlNet(operations=operations, device=offload_device, dtype=unet_dtype)
548
+ control_model = controlnet_load_state_dict(control_model, controlnet_data)
549
+
550
+ latent_format = comfy.latent_formats.SDXL()
551
+ extra_conds = ['text_embedding_mask', 'encoder_hidden_states_t5', 'text_embedding_mask_t5', 'image_meta_size', 'style', 'cos_cis_img', 'sin_cis_img']
552
+ control = ControlNet(control_model, compression_ratio=1, latent_format=latent_format, load_device=load_device, manual_cast_dtype=manual_cast_dtype, extra_conds=extra_conds, strength_type=StrengthType.CONSTANT)
553
+ return control
554
+
555
+ def load_controlnet_flux_xlabs_mistoline(sd, mistoline=False, model_options={}):
556
+ model_config, operations, load_device, unet_dtype, manual_cast_dtype, offload_device = controlnet_config(sd, model_options=model_options)
557
+ control_model = comfy.ldm.flux.controlnet.ControlNetFlux(mistoline=mistoline, operations=operations, device=offload_device, dtype=unet_dtype, **model_config.unet_config)
558
+ control_model = controlnet_load_state_dict(control_model, sd)
559
+ extra_conds = ['y', 'guidance']
560
+ control = ControlNet(control_model, load_device=load_device, manual_cast_dtype=manual_cast_dtype, extra_conds=extra_conds)
561
+ return control
562
+
563
+ def load_controlnet_flux_instantx(sd, model_options={}):
564
+ new_sd = comfy.model_detection.convert_diffusers_mmdit(sd, "")
565
+ model_config, operations, load_device, unet_dtype, manual_cast_dtype, offload_device = controlnet_config(new_sd, model_options=model_options)
566
+ for k in sd:
567
+ new_sd[k] = sd[k]
568
+
569
+ num_union_modes = 0
570
+ union_cnet = "controlnet_mode_embedder.weight"
571
+ if union_cnet in new_sd:
572
+ num_union_modes = new_sd[union_cnet].shape[0]
573
+
574
+ control_latent_channels = new_sd.get("pos_embed_input.weight").shape[1] // 4
575
+ concat_mask = False
576
+ if control_latent_channels == 17:
577
+ concat_mask = True
578
+
579
+ control_model = comfy.ldm.flux.controlnet.ControlNetFlux(latent_input=True, num_union_modes=num_union_modes, control_latent_channels=control_latent_channels, operations=operations, device=offload_device, dtype=unet_dtype, **model_config.unet_config)
580
+ control_model = controlnet_load_state_dict(control_model, new_sd)
581
+
582
+ latent_format = comfy.latent_formats.Flux()
583
+ extra_conds = ['y', 'guidance']
584
+ control = ControlNet(control_model, compression_ratio=1, latent_format=latent_format, concat_mask=concat_mask, load_device=load_device, manual_cast_dtype=manual_cast_dtype, extra_conds=extra_conds)
585
+ return control
586
+
587
+ def convert_mistoline(sd):
588
+ return comfy.utils.state_dict_prefix_replace(sd, {"single_controlnet_blocks.": "controlnet_single_blocks."})
589
+
590
+
591
+ def load_controlnet_state_dict(state_dict, model=None, model_options={}):
592
+ controlnet_data = state_dict
593
+ if 'after_proj_list.18.bias' in controlnet_data.keys(): #Hunyuan DiT
594
+ return load_controlnet_hunyuandit(controlnet_data, model_options=model_options)
595
+
596
+ if "lora_controlnet" in controlnet_data:
597
+ return ControlLora(controlnet_data, model_options=model_options)
598
+
599
+ controlnet_config = None
600
+ supported_inference_dtypes = None
601
+
602
+ if "controlnet_cond_embedding.conv_in.weight" in controlnet_data: #diffusers format
603
+ controlnet_config = comfy.model_detection.unet_config_from_diffusers_unet(controlnet_data)
604
+ diffusers_keys = comfy.utils.unet_to_diffusers(controlnet_config)
605
+ diffusers_keys["controlnet_mid_block.weight"] = "middle_block_out.0.weight"
606
+ diffusers_keys["controlnet_mid_block.bias"] = "middle_block_out.0.bias"
607
+
608
+ count = 0
609
+ loop = True
610
+ while loop:
611
+ suffix = [".weight", ".bias"]
612
+ for s in suffix:
613
+ k_in = "controlnet_down_blocks.{}{}".format(count, s)
614
+ k_out = "zero_convs.{}.0{}".format(count, s)
615
+ if k_in not in controlnet_data:
616
+ loop = False
617
+ break
618
+ diffusers_keys[k_in] = k_out
619
+ count += 1
620
+
621
+ count = 0
622
+ loop = True
623
+ while loop:
624
+ suffix = [".weight", ".bias"]
625
+ for s in suffix:
626
+ if count == 0:
627
+ k_in = "controlnet_cond_embedding.conv_in{}".format(s)
628
+ else:
629
+ k_in = "controlnet_cond_embedding.blocks.{}{}".format(count - 1, s)
630
+ k_out = "input_hint_block.{}{}".format(count * 2, s)
631
+ if k_in not in controlnet_data:
632
+ k_in = "controlnet_cond_embedding.conv_out{}".format(s)
633
+ loop = False
634
+ diffusers_keys[k_in] = k_out
635
+ count += 1
636
+
637
+ new_sd = {}
638
+ for k in diffusers_keys:
639
+ if k in controlnet_data:
640
+ new_sd[diffusers_keys[k]] = controlnet_data.pop(k)
641
+
642
+ if "control_add_embedding.linear_1.bias" in controlnet_data: #Union Controlnet
643
+ controlnet_config["union_controlnet_num_control_type"] = controlnet_data["task_embedding"].shape[0]
644
+ for k in list(controlnet_data.keys()):
645
+ new_k = k.replace('.attn.in_proj_', '.attn.in_proj.')
646
+ new_sd[new_k] = controlnet_data.pop(k)
647
+
648
+ leftover_keys = controlnet_data.keys()
649
+ if len(leftover_keys) > 0:
650
+ logging.warning("leftover keys: {}".format(leftover_keys))
651
+ controlnet_data = new_sd
652
+ elif "controlnet_blocks.0.weight" in controlnet_data:
653
+ if "double_blocks.0.img_attn.norm.key_norm.scale" in controlnet_data:
654
+ return load_controlnet_flux_xlabs_mistoline(controlnet_data, model_options=model_options)
655
+ elif "pos_embed_input.proj.weight" in controlnet_data:
656
+ if "transformer_blocks.0.adaLN_modulation.1.bias" in controlnet_data:
657
+ return load_controlnet_sd35(controlnet_data, model_options=model_options) #Stability sd3.5 format
658
+ else:
659
+ return load_controlnet_mmdit(controlnet_data, model_options=model_options) #SD3 diffusers controlnet
660
+ elif "controlnet_x_embedder.weight" in controlnet_data:
661
+ return load_controlnet_flux_instantx(controlnet_data, model_options=model_options)
662
+ elif "controlnet_blocks.0.linear.weight" in controlnet_data: #mistoline flux
663
+ return load_controlnet_flux_xlabs_mistoline(convert_mistoline(controlnet_data), mistoline=True, model_options=model_options)
664
+
665
+ pth_key = 'control_model.zero_convs.0.0.weight'
666
+ pth = False
667
+ key = 'zero_convs.0.0.weight'
668
+ if pth_key in controlnet_data:
669
+ pth = True
670
+ key = pth_key
671
+ prefix = "control_model."
672
+ elif key in controlnet_data:
673
+ prefix = ""
674
+ else:
675
+ net = load_t2i_adapter(controlnet_data, model_options=model_options)
676
+ if net is None:
677
+ logging.error("error could not detect control model type.")
678
+ return net
679
+
680
+ if controlnet_config is None:
681
+ model_config = comfy.model_detection.model_config_from_unet(controlnet_data, prefix, True)
682
+ supported_inference_dtypes = list(model_config.supported_inference_dtypes)
683
+ controlnet_config = model_config.unet_config
684
+
685
+ unet_dtype = model_options.get("dtype", None)
686
+ if unet_dtype is None:
687
+ weight_dtype = comfy.utils.weight_dtype(controlnet_data)
688
+
689
+ if supported_inference_dtypes is None:
690
+ supported_inference_dtypes = [comfy.model_management.unet_dtype()]
691
+
692
+ if weight_dtype is not None:
693
+ supported_inference_dtypes.append(weight_dtype)
694
+
695
+ unet_dtype = comfy.model_management.unet_dtype(model_params=-1, supported_dtypes=supported_inference_dtypes)
696
+
697
+ load_device = comfy.model_management.get_torch_device()
698
+
699
+ manual_cast_dtype = comfy.model_management.unet_manual_cast(unet_dtype, load_device)
700
+ operations = model_options.get("custom_operations", None)
701
+ if operations is None:
702
+ operations = comfy.ops.pick_operations(unet_dtype, manual_cast_dtype)
703
+
704
+ controlnet_config["operations"] = operations
705
+ controlnet_config["dtype"] = unet_dtype
706
+ controlnet_config["device"] = comfy.model_management.unet_offload_device()
707
+ controlnet_config.pop("out_channels")
708
+ controlnet_config["hint_channels"] = controlnet_data["{}input_hint_block.0.weight".format(prefix)].shape[1]
709
+ control_model = comfy.cldm.cldm.ControlNet(**controlnet_config)
710
+
711
+ if pth:
712
+ if 'difference' in controlnet_data:
713
+ if model is not None:
714
+ comfy.model_management.load_models_gpu([model])
715
+ model_sd = model.model_state_dict()
716
+ for x in controlnet_data:
717
+ c_m = "control_model."
718
+ if x.startswith(c_m):
719
+ sd_key = "diffusion_model.{}".format(x[len(c_m):])
720
+ if sd_key in model_sd:
721
+ cd = controlnet_data[x]
722
+ cd += model_sd[sd_key].type(cd.dtype).to(cd.device)
723
+ else:
724
+ logging.warning("WARNING: Loaded a diff controlnet without a model. It will very likely not work.")
725
+
726
+ class WeightsLoader(torch.nn.Module):
727
+ pass
728
+ w = WeightsLoader()
729
+ w.control_model = control_model
730
+ missing, unexpected = w.load_state_dict(controlnet_data, strict=False)
731
+ else:
732
+ missing, unexpected = control_model.load_state_dict(controlnet_data, strict=False)
733
+
734
+ if len(missing) > 0:
735
+ logging.warning("missing controlnet keys: {}".format(missing))
736
+
737
+ if len(unexpected) > 0:
738
+ logging.debug("unexpected controlnet keys: {}".format(unexpected))
739
+
740
+ global_average_pooling = model_options.get("global_average_pooling", False)
741
+ control = ControlNet(control_model, global_average_pooling=global_average_pooling, load_device=load_device, manual_cast_dtype=manual_cast_dtype)
742
+ return control
743
+
744
+ def load_controlnet(ckpt_path, model=None, model_options={}):
745
+ if "global_average_pooling" not in model_options:
746
+ filename = os.path.splitext(ckpt_path)[0]
747
+ if filename.endswith("_shuffle") or filename.endswith("_shuffle_fp16"): #TODO: smarter way of enabling global_average_pooling
748
+ model_options["global_average_pooling"] = True
749
+
750
+ cnet = load_controlnet_state_dict(comfy.utils.load_torch_file(ckpt_path, safe_load=True), model=model, model_options=model_options)
751
+ if cnet is None:
752
+ logging.error("error checkpoint does not contain controlnet or t2i adapter data {}".format(ckpt_path))
753
+ return cnet
754
+
755
+ class T2IAdapter(ControlBase):
756
+ def __init__(self, t2i_model, channels_in, compression_ratio, upscale_algorithm, device=None):
757
+ super().__init__()
758
+ self.t2i_model = t2i_model
759
+ self.channels_in = channels_in
760
+ self.control_input = None
761
+ self.compression_ratio = compression_ratio
762
+ self.upscale_algorithm = upscale_algorithm
763
+ if device is None:
764
+ device = comfy.model_management.get_torch_device()
765
+ self.device = device
766
+
767
+ def scale_image_to(self, width, height):
768
+ unshuffle_amount = self.t2i_model.unshuffle_amount
769
+ width = math.ceil(width / unshuffle_amount) * unshuffle_amount
770
+ height = math.ceil(height / unshuffle_amount) * unshuffle_amount
771
+ return width, height
772
+
773
+ def get_control(self, x_noisy, t, cond, batched_number, transformer_options):
774
+ control_prev = None
775
+ if self.previous_controlnet is not None:
776
+ control_prev = self.previous_controlnet.get_control(x_noisy, t, cond, batched_number, transformer_options)
777
+
778
+ if self.timestep_range is not None:
779
+ if t[0] > self.timestep_range[0] or t[0] < self.timestep_range[1]:
780
+ if control_prev is not None:
781
+ return control_prev
782
+ else:
783
+ return None
784
+
785
+ if self.cond_hint is None or x_noisy.shape[2] * self.compression_ratio != self.cond_hint.shape[2] or x_noisy.shape[3] * self.compression_ratio != self.cond_hint.shape[3]:
786
+ if self.cond_hint is not None:
787
+ del self.cond_hint
788
+ self.control_input = None
789
+ self.cond_hint = None
790
+ width, height = self.scale_image_to(x_noisy.shape[3] * self.compression_ratio, x_noisy.shape[2] * self.compression_ratio)
791
+ self.cond_hint = comfy.utils.common_upscale(self.cond_hint_original, width, height, self.upscale_algorithm, "center").float().to(self.device)
792
+ if self.channels_in == 1 and self.cond_hint.shape[1] > 1:
793
+ self.cond_hint = torch.mean(self.cond_hint, 1, keepdim=True)
794
+ if x_noisy.shape[0] != self.cond_hint.shape[0]:
795
+ self.cond_hint = broadcast_image_to(self.cond_hint, x_noisy.shape[0], batched_number)
796
+ if self.control_input is None:
797
+ self.t2i_model.to(x_noisy.dtype)
798
+ self.t2i_model.to(self.device)
799
+ self.control_input = self.t2i_model(self.cond_hint.to(x_noisy.dtype))
800
+ self.t2i_model.cpu()
801
+
802
+ control_input = {}
803
+ for k in self.control_input:
804
+ control_input[k] = list(map(lambda a: None if a is None else a.clone(), self.control_input[k]))
805
+
806
+ return self.control_merge(control_input, control_prev, x_noisy.dtype)
807
+
808
+ def copy(self):
809
+ c = T2IAdapter(self.t2i_model, self.channels_in, self.compression_ratio, self.upscale_algorithm)
810
+ self.copy_to(c)
811
+ return c
812
+
813
+ def load_t2i_adapter(t2i_data, model_options={}): #TODO: model_options
814
+ compression_ratio = 8
815
+ upscale_algorithm = 'nearest-exact'
816
+
817
+ if 'adapter' in t2i_data:
818
+ t2i_data = t2i_data['adapter']
819
+ if 'adapter.body.0.resnets.0.block1.weight' in t2i_data: #diffusers format
820
+ prefix_replace = {}
821
+ for i in range(4):
822
+ for j in range(2):
823
+ prefix_replace["adapter.body.{}.resnets.{}.".format(i, j)] = "body.{}.".format(i * 2 + j)
824
+ prefix_replace["adapter.body.{}.".format(i, )] = "body.{}.".format(i * 2)
825
+ prefix_replace["adapter."] = ""
826
+ t2i_data = comfy.utils.state_dict_prefix_replace(t2i_data, prefix_replace)
827
+ keys = t2i_data.keys()
828
+
829
+ if "body.0.in_conv.weight" in keys:
830
+ cin = t2i_data['body.0.in_conv.weight'].shape[1]
831
+ model_ad = comfy.t2i_adapter.adapter.Adapter_light(cin=cin, channels=[320, 640, 1280, 1280], nums_rb=4)
832
+ elif 'conv_in.weight' in keys:
833
+ cin = t2i_data['conv_in.weight'].shape[1]
834
+ channel = t2i_data['conv_in.weight'].shape[0]
835
+ ksize = t2i_data['body.0.block2.weight'].shape[2]
836
+ use_conv = False
837
+ down_opts = list(filter(lambda a: a.endswith("down_opt.op.weight"), keys))
838
+ if len(down_opts) > 0:
839
+ use_conv = True
840
+ xl = False
841
+ if cin == 256 or cin == 768:
842
+ xl = True
843
+ model_ad = comfy.t2i_adapter.adapter.Adapter(cin=cin, channels=[channel, channel*2, channel*4, channel*4][:4], nums_rb=2, ksize=ksize, sk=True, use_conv=use_conv, xl=xl)
844
+ elif "backbone.0.0.weight" in keys:
845
+ model_ad = comfy.ldm.cascade.controlnet.ControlNet(c_in=t2i_data['backbone.0.0.weight'].shape[1], proj_blocks=[0, 4, 8, 12, 51, 55, 59, 63])
846
+ compression_ratio = 32
847
+ upscale_algorithm = 'bilinear'
848
+ elif "backbone.10.blocks.0.weight" in keys:
849
+ model_ad = comfy.ldm.cascade.controlnet.ControlNet(c_in=t2i_data['backbone.0.weight'].shape[1], bottleneck_mode="large", proj_blocks=[0, 4, 8, 12, 51, 55, 59, 63])
850
+ compression_ratio = 1
851
+ upscale_algorithm = 'nearest-exact'
852
+ else:
853
+ return None
854
+
855
+ missing, unexpected = model_ad.load_state_dict(t2i_data)
856
+ if len(missing) > 0:
857
+ logging.warning("t2i missing {}".format(missing))
858
+
859
+ if len(unexpected) > 0:
860
+ logging.debug("t2i unexpected {}".format(unexpected))
861
+
862
+ return T2IAdapter(model_ad, model_ad.input_channels, compression_ratio, upscale_algorithm)
comfy/diffusers_convert.py ADDED
@@ -0,0 +1,288 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+ import torch
3
+ import logging
4
+
5
+ # conversion code from https://github.com/huggingface/diffusers/blob/main/scripts/convert_diffusers_to_original_stable_diffusion.py
6
+
7
+ # =================#
8
+ # UNet Conversion #
9
+ # =================#
10
+
11
+ unet_conversion_map = [
12
+ # (stable-diffusion, HF Diffusers)
13
+ ("time_embed.0.weight", "time_embedding.linear_1.weight"),
14
+ ("time_embed.0.bias", "time_embedding.linear_1.bias"),
15
+ ("time_embed.2.weight", "time_embedding.linear_2.weight"),
16
+ ("time_embed.2.bias", "time_embedding.linear_2.bias"),
17
+ ("input_blocks.0.0.weight", "conv_in.weight"),
18
+ ("input_blocks.0.0.bias", "conv_in.bias"),
19
+ ("out.0.weight", "conv_norm_out.weight"),
20
+ ("out.0.bias", "conv_norm_out.bias"),
21
+ ("out.2.weight", "conv_out.weight"),
22
+ ("out.2.bias", "conv_out.bias"),
23
+ ]
24
+
25
+ unet_conversion_map_resnet = [
26
+ # (stable-diffusion, HF Diffusers)
27
+ ("in_layers.0", "norm1"),
28
+ ("in_layers.2", "conv1"),
29
+ ("out_layers.0", "norm2"),
30
+ ("out_layers.3", "conv2"),
31
+ ("emb_layers.1", "time_emb_proj"),
32
+ ("skip_connection", "conv_shortcut"),
33
+ ]
34
+
35
+ unet_conversion_map_layer = []
36
+ # hardcoded number of downblocks and resnets/attentions...
37
+ # would need smarter logic for other networks.
38
+ for i in range(4):
39
+ # loop over downblocks/upblocks
40
+
41
+ for j in range(2):
42
+ # loop over resnets/attentions for downblocks
43
+ hf_down_res_prefix = f"down_blocks.{i}.resnets.{j}."
44
+ sd_down_res_prefix = f"input_blocks.{3 * i + j + 1}.0."
45
+ unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
46
+
47
+ if i < 3:
48
+ # no attention layers in down_blocks.3
49
+ hf_down_atn_prefix = f"down_blocks.{i}.attentions.{j}."
50
+ sd_down_atn_prefix = f"input_blocks.{3 * i + j + 1}.1."
51
+ unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
52
+
53
+ for j in range(3):
54
+ # loop over resnets/attentions for upblocks
55
+ hf_up_res_prefix = f"up_blocks.{i}.resnets.{j}."
56
+ sd_up_res_prefix = f"output_blocks.{3 * i + j}.0."
57
+ unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
58
+
59
+ if i > 0:
60
+ # no attention layers in up_blocks.0
61
+ hf_up_atn_prefix = f"up_blocks.{i}.attentions.{j}."
62
+ sd_up_atn_prefix = f"output_blocks.{3 * i + j}.1."
63
+ unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
64
+
65
+ if i < 3:
66
+ # no downsample in down_blocks.3
67
+ hf_downsample_prefix = f"down_blocks.{i}.downsamplers.0.conv."
68
+ sd_downsample_prefix = f"input_blocks.{3 * (i + 1)}.0.op."
69
+ unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
70
+
71
+ # no upsample in up_blocks.3
72
+ hf_upsample_prefix = f"up_blocks.{i}.upsamplers.0."
73
+ sd_upsample_prefix = f"output_blocks.{3 * i + 2}.{1 if i == 0 else 2}."
74
+ unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
75
+
76
+ hf_mid_atn_prefix = "mid_block.attentions.0."
77
+ sd_mid_atn_prefix = "middle_block.1."
78
+ unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
79
+
80
+ for j in range(2):
81
+ hf_mid_res_prefix = f"mid_block.resnets.{j}."
82
+ sd_mid_res_prefix = f"middle_block.{2 * j}."
83
+ unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
84
+
85
+
86
+ def convert_unet_state_dict(unet_state_dict):
87
+ # buyer beware: this is a *brittle* function,
88
+ # and correct output requires that all of these pieces interact in
89
+ # the exact order in which I have arranged them.
90
+ mapping = {k: k for k in unet_state_dict.keys()}
91
+ for sd_name, hf_name in unet_conversion_map:
92
+ mapping[hf_name] = sd_name
93
+ for k, v in mapping.items():
94
+ if "resnets" in k:
95
+ for sd_part, hf_part in unet_conversion_map_resnet:
96
+ v = v.replace(hf_part, sd_part)
97
+ mapping[k] = v
98
+ for k, v in mapping.items():
99
+ for sd_part, hf_part in unet_conversion_map_layer:
100
+ v = v.replace(hf_part, sd_part)
101
+ mapping[k] = v
102
+ new_state_dict = {v: unet_state_dict[k] for k, v in mapping.items()}
103
+ return new_state_dict
104
+
105
+
106
+ # ================#
107
+ # VAE Conversion #
108
+ # ================#
109
+
110
+ vae_conversion_map = [
111
+ # (stable-diffusion, HF Diffusers)
112
+ ("nin_shortcut", "conv_shortcut"),
113
+ ("norm_out", "conv_norm_out"),
114
+ ("mid.attn_1.", "mid_block.attentions.0."),
115
+ ]
116
+
117
+ for i in range(4):
118
+ # down_blocks have two resnets
119
+ for j in range(2):
120
+ hf_down_prefix = f"encoder.down_blocks.{i}.resnets.{j}."
121
+ sd_down_prefix = f"encoder.down.{i}.block.{j}."
122
+ vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
123
+
124
+ if i < 3:
125
+ hf_downsample_prefix = f"down_blocks.{i}.downsamplers.0."
126
+ sd_downsample_prefix = f"down.{i}.downsample."
127
+ vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
128
+
129
+ hf_upsample_prefix = f"up_blocks.{i}.upsamplers.0."
130
+ sd_upsample_prefix = f"up.{3 - i}.upsample."
131
+ vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
132
+
133
+ # up_blocks have three resnets
134
+ # also, up blocks in hf are numbered in reverse from sd
135
+ for j in range(3):
136
+ hf_up_prefix = f"decoder.up_blocks.{i}.resnets.{j}."
137
+ sd_up_prefix = f"decoder.up.{3 - i}.block.{j}."
138
+ vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
139
+
140
+ # this part accounts for mid blocks in both the encoder and the decoder
141
+ for i in range(2):
142
+ hf_mid_res_prefix = f"mid_block.resnets.{i}."
143
+ sd_mid_res_prefix = f"mid.block_{i + 1}."
144
+ vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
145
+
146
+ vae_conversion_map_attn = [
147
+ # (stable-diffusion, HF Diffusers)
148
+ ("norm.", "group_norm."),
149
+ ("q.", "query."),
150
+ ("k.", "key."),
151
+ ("v.", "value."),
152
+ ("q.", "to_q."),
153
+ ("k.", "to_k."),
154
+ ("v.", "to_v."),
155
+ ("proj_out.", "to_out.0."),
156
+ ("proj_out.", "proj_attn."),
157
+ ]
158
+
159
+
160
+ def reshape_weight_for_sd(w, conv3d=False):
161
+ # convert HF linear weights to SD conv2d weights
162
+ if conv3d:
163
+ return w.reshape(*w.shape, 1, 1, 1)
164
+ else:
165
+ return w.reshape(*w.shape, 1, 1)
166
+
167
+
168
+ def convert_vae_state_dict(vae_state_dict):
169
+ mapping = {k: k for k in vae_state_dict.keys()}
170
+ conv3d = False
171
+ for k, v in mapping.items():
172
+ for sd_part, hf_part in vae_conversion_map:
173
+ v = v.replace(hf_part, sd_part)
174
+ if v.endswith(".conv.weight"):
175
+ if not conv3d and vae_state_dict[k].ndim == 5:
176
+ conv3d = True
177
+ mapping[k] = v
178
+ for k, v in mapping.items():
179
+ if "attentions" in k:
180
+ for sd_part, hf_part in vae_conversion_map_attn:
181
+ v = v.replace(hf_part, sd_part)
182
+ mapping[k] = v
183
+ new_state_dict = {v: vae_state_dict[k] for k, v in mapping.items()}
184
+ weights_to_convert = ["q", "k", "v", "proj_out"]
185
+ for k, v in new_state_dict.items():
186
+ for weight_name in weights_to_convert:
187
+ if f"mid.attn_1.{weight_name}.weight" in k:
188
+ logging.debug(f"Reshaping {k} for SD format")
189
+ new_state_dict[k] = reshape_weight_for_sd(v, conv3d=conv3d)
190
+ return new_state_dict
191
+
192
+
193
+ # =========================#
194
+ # Text Encoder Conversion #
195
+ # =========================#
196
+
197
+
198
+ textenc_conversion_lst = [
199
+ # (stable-diffusion, HF Diffusers)
200
+ ("resblocks.", "text_model.encoder.layers."),
201
+ ("ln_1", "layer_norm1"),
202
+ ("ln_2", "layer_norm2"),
203
+ (".c_fc.", ".fc1."),
204
+ (".c_proj.", ".fc2."),
205
+ (".attn", ".self_attn"),
206
+ ("ln_final.", "transformer.text_model.final_layer_norm."),
207
+ ("token_embedding.weight", "transformer.text_model.embeddings.token_embedding.weight"),
208
+ ("positional_embedding", "transformer.text_model.embeddings.position_embedding.weight"),
209
+ ]
210
+ protected = {re.escape(x[1]): x[0] for x in textenc_conversion_lst}
211
+ textenc_pattern = re.compile("|".join(protected.keys()))
212
+
213
+ # Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
214
+ code2idx = {"q": 0, "k": 1, "v": 2}
215
+
216
+ # This function exists because at the time of writing torch.cat can't do fp8 with cuda
217
+ def cat_tensors(tensors):
218
+ x = 0
219
+ for t in tensors:
220
+ x += t.shape[0]
221
+
222
+ shape = [x] + list(tensors[0].shape)[1:]
223
+ out = torch.empty(shape, device=tensors[0].device, dtype=tensors[0].dtype)
224
+
225
+ x = 0
226
+ for t in tensors:
227
+ out[x:x + t.shape[0]] = t
228
+ x += t.shape[0]
229
+
230
+ return out
231
+
232
+ def convert_text_enc_state_dict_v20(text_enc_dict, prefix=""):
233
+ new_state_dict = {}
234
+ capture_qkv_weight = {}
235
+ capture_qkv_bias = {}
236
+ for k, v in text_enc_dict.items():
237
+ if not k.startswith(prefix):
238
+ continue
239
+ if (
240
+ k.endswith(".self_attn.q_proj.weight")
241
+ or k.endswith(".self_attn.k_proj.weight")
242
+ or k.endswith(".self_attn.v_proj.weight")
243
+ ):
244
+ k_pre = k[: -len(".q_proj.weight")]
245
+ k_code = k[-len("q_proj.weight")]
246
+ if k_pre not in capture_qkv_weight:
247
+ capture_qkv_weight[k_pre] = [None, None, None]
248
+ capture_qkv_weight[k_pre][code2idx[k_code]] = v
249
+ continue
250
+
251
+ if (
252
+ k.endswith(".self_attn.q_proj.bias")
253
+ or k.endswith(".self_attn.k_proj.bias")
254
+ or k.endswith(".self_attn.v_proj.bias")
255
+ ):
256
+ k_pre = k[: -len(".q_proj.bias")]
257
+ k_code = k[-len("q_proj.bias")]
258
+ if k_pre not in capture_qkv_bias:
259
+ capture_qkv_bias[k_pre] = [None, None, None]
260
+ capture_qkv_bias[k_pre][code2idx[k_code]] = v
261
+ continue
262
+
263
+ text_proj = "transformer.text_projection.weight"
264
+ if k.endswith(text_proj):
265
+ new_state_dict[k.replace(text_proj, "text_projection")] = v.transpose(0, 1).contiguous()
266
+ else:
267
+ relabelled_key = textenc_pattern.sub(lambda m: protected[re.escape(m.group(0))], k)
268
+ new_state_dict[relabelled_key] = v
269
+
270
+ for k_pre, tensors in capture_qkv_weight.items():
271
+ if None in tensors:
272
+ raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing")
273
+ relabelled_key = textenc_pattern.sub(lambda m: protected[re.escape(m.group(0))], k_pre)
274
+ new_state_dict[relabelled_key + ".in_proj_weight"] = cat_tensors(tensors)
275
+
276
+ for k_pre, tensors in capture_qkv_bias.items():
277
+ if None in tensors:
278
+ raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing")
279
+ relabelled_key = textenc_pattern.sub(lambda m: protected[re.escape(m.group(0))], k_pre)
280
+ new_state_dict[relabelled_key + ".in_proj_bias"] = cat_tensors(tensors)
281
+
282
+ return new_state_dict
283
+
284
+
285
+ def convert_text_enc_state_dict(text_enc_dict):
286
+ return text_enc_dict
287
+
288
+
comfy/diffusers_load.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ import comfy.sd
4
+
5
+ def first_file(path, filenames):
6
+ for f in filenames:
7
+ p = os.path.join(path, f)
8
+ if os.path.exists(p):
9
+ return p
10
+ return None
11
+
12
+ def load_diffusers(model_path, output_vae=True, output_clip=True, embedding_directory=None):
13
+ diffusion_model_names = ["diffusion_pytorch_model.fp16.safetensors", "diffusion_pytorch_model.safetensors", "diffusion_pytorch_model.fp16.bin", "diffusion_pytorch_model.bin"]
14
+ unet_path = first_file(os.path.join(model_path, "unet"), diffusion_model_names)
15
+ vae_path = first_file(os.path.join(model_path, "vae"), diffusion_model_names)
16
+
17
+ text_encoder_model_names = ["model.fp16.safetensors", "model.safetensors", "pytorch_model.fp16.bin", "pytorch_model.bin"]
18
+ text_encoder1_path = first_file(os.path.join(model_path, "text_encoder"), text_encoder_model_names)
19
+ text_encoder2_path = first_file(os.path.join(model_path, "text_encoder_2"), text_encoder_model_names)
20
+
21
+ text_encoder_paths = [text_encoder1_path]
22
+ if text_encoder2_path is not None:
23
+ text_encoder_paths.append(text_encoder2_path)
24
+
25
+ unet = comfy.sd.load_diffusion_model(unet_path)
26
+
27
+ clip = None
28
+ if output_clip:
29
+ clip = comfy.sd.load_clip(text_encoder_paths, embedding_directory=embedding_directory)
30
+
31
+ vae = None
32
+ if output_vae:
33
+ sd = comfy.utils.load_torch_file(vae_path)
34
+ vae = comfy.sd.VAE(sd=sd)
35
+
36
+ return (unet, clip, vae)
comfy/extra_samplers/uni_pc.py ADDED
@@ -0,0 +1,873 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #code taken from: https://github.com/wl-zhao/UniPC and modified
2
+
3
+ import torch
4
+ import math
5
+ import logging
6
+
7
+ from tqdm.auto import trange
8
+
9
+
10
+ class NoiseScheduleVP:
11
+ def __init__(
12
+ self,
13
+ schedule='discrete',
14
+ betas=None,
15
+ alphas_cumprod=None,
16
+ continuous_beta_0=0.1,
17
+ continuous_beta_1=20.,
18
+ ):
19
+ r"""Create a wrapper class for the forward SDE (VP type).
20
+
21
+ ***
22
+ Update: We support discrete-time diffusion models by implementing a picewise linear interpolation for log_alpha_t.
23
+ We recommend to use schedule='discrete' for the discrete-time diffusion models, especially for high-resolution images.
24
+ ***
25
+
26
+ The forward SDE ensures that the condition distribution q_{t|0}(x_t | x_0) = N ( alpha_t * x_0, sigma_t^2 * I ).
27
+ We further define lambda_t = log(alpha_t) - log(sigma_t), which is the half-logSNR (described in the DPM-Solver paper).
28
+ Therefore, we implement the functions for computing alpha_t, sigma_t and lambda_t. For t in [0, T], we have:
29
+
30
+ log_alpha_t = self.marginal_log_mean_coeff(t)
31
+ sigma_t = self.marginal_std(t)
32
+ lambda_t = self.marginal_lambda(t)
33
+
34
+ Moreover, as lambda(t) is an invertible function, we also support its inverse function:
35
+
36
+ t = self.inverse_lambda(lambda_t)
37
+
38
+ ===============================================================
39
+
40
+ We support both discrete-time DPMs (trained on n = 0, 1, ..., N-1) and continuous-time DPMs (trained on t in [t_0, T]).
41
+
42
+ 1. For discrete-time DPMs:
43
+
44
+ For discrete-time DPMs trained on n = 0, 1, ..., N-1, we convert the discrete steps to continuous time steps by:
45
+ t_i = (i + 1) / N
46
+ e.g. for N = 1000, we have t_0 = 1e-3 and T = t_{N-1} = 1.
47
+ We solve the corresponding diffusion ODE from time T = 1 to time t_0 = 1e-3.
48
+
49
+ Args:
50
+ betas: A `torch.Tensor`. The beta array for the discrete-time DPM. (See the original DDPM paper for details)
51
+ alphas_cumprod: A `torch.Tensor`. The cumprod alphas for the discrete-time DPM. (See the original DDPM paper for details)
52
+
53
+ Note that we always have alphas_cumprod = cumprod(betas). Therefore, we only need to set one of `betas` and `alphas_cumprod`.
54
+
55
+ **Important**: Please pay special attention for the args for `alphas_cumprod`:
56
+ The `alphas_cumprod` is the \hat{alpha_n} arrays in the notations of DDPM. Specifically, DDPMs assume that
57
+ q_{t_n | 0}(x_{t_n} | x_0) = N ( \sqrt{\hat{alpha_n}} * x_0, (1 - \hat{alpha_n}) * I ).
58
+ Therefore, the notation \hat{alpha_n} is different from the notation alpha_t in DPM-Solver. In fact, we have
59
+ alpha_{t_n} = \sqrt{\hat{alpha_n}},
60
+ and
61
+ log(alpha_{t_n}) = 0.5 * log(\hat{alpha_n}).
62
+
63
+
64
+ 2. For continuous-time DPMs:
65
+
66
+ We support two types of VPSDEs: linear (DDPM) and cosine (improved-DDPM). The hyperparameters for the noise
67
+ schedule are the default settings in DDPM and improved-DDPM:
68
+
69
+ Args:
70
+ beta_min: A `float` number. The smallest beta for the linear schedule.
71
+ beta_max: A `float` number. The largest beta for the linear schedule.
72
+ cosine_s: A `float` number. The hyperparameter in the cosine schedule.
73
+ cosine_beta_max: A `float` number. The hyperparameter in the cosine schedule.
74
+ T: A `float` number. The ending time of the forward process.
75
+
76
+ ===============================================================
77
+
78
+ Args:
79
+ schedule: A `str`. The noise schedule of the forward SDE. 'discrete' for discrete-time DPMs,
80
+ 'linear' or 'cosine' for continuous-time DPMs.
81
+ Returns:
82
+ A wrapper object of the forward SDE (VP type).
83
+
84
+ ===============================================================
85
+
86
+ Example:
87
+
88
+ # For discrete-time DPMs, given betas (the beta array for n = 0, 1, ..., N - 1):
89
+ >>> ns = NoiseScheduleVP('discrete', betas=betas)
90
+
91
+ # For discrete-time DPMs, given alphas_cumprod (the \hat{alpha_n} array for n = 0, 1, ..., N - 1):
92
+ >>> ns = NoiseScheduleVP('discrete', alphas_cumprod=alphas_cumprod)
93
+
94
+ # For continuous-time DPMs (VPSDE), linear schedule:
95
+ >>> ns = NoiseScheduleVP('linear', continuous_beta_0=0.1, continuous_beta_1=20.)
96
+
97
+ """
98
+
99
+ if schedule not in ['discrete', 'linear', 'cosine']:
100
+ raise ValueError("Unsupported noise schedule {}. The schedule needs to be 'discrete' or 'linear' or 'cosine'".format(schedule))
101
+
102
+ self.schedule = schedule
103
+ if schedule == 'discrete':
104
+ if betas is not None:
105
+ log_alphas = 0.5 * torch.log(1 - betas).cumsum(dim=0)
106
+ else:
107
+ assert alphas_cumprod is not None
108
+ log_alphas = 0.5 * torch.log(alphas_cumprod)
109
+ self.total_N = len(log_alphas)
110
+ self.T = 1.
111
+ self.t_array = torch.linspace(0., 1., self.total_N + 1)[1:].reshape((1, -1))
112
+ self.log_alpha_array = log_alphas.reshape((1, -1,))
113
+ else:
114
+ self.total_N = 1000
115
+ self.beta_0 = continuous_beta_0
116
+ self.beta_1 = continuous_beta_1
117
+ self.cosine_s = 0.008
118
+ self.cosine_beta_max = 999.
119
+ self.cosine_t_max = math.atan(self.cosine_beta_max * (1. + self.cosine_s) / math.pi) * 2. * (1. + self.cosine_s) / math.pi - self.cosine_s
120
+ self.cosine_log_alpha_0 = math.log(math.cos(self.cosine_s / (1. + self.cosine_s) * math.pi / 2.))
121
+ self.schedule = schedule
122
+ if schedule == 'cosine':
123
+ # For the cosine schedule, T = 1 will have numerical issues. So we manually set the ending time T.
124
+ # Note that T = 0.9946 may be not the optimal setting. However, we find it works well.
125
+ self.T = 0.9946
126
+ else:
127
+ self.T = 1.
128
+
129
+ def marginal_log_mean_coeff(self, t):
130
+ """
131
+ Compute log(alpha_t) of a given continuous-time label t in [0, T].
132
+ """
133
+ if self.schedule == 'discrete':
134
+ return interpolate_fn(t.reshape((-1, 1)), self.t_array.to(t.device), self.log_alpha_array.to(t.device)).reshape((-1))
135
+ elif self.schedule == 'linear':
136
+ return -0.25 * t ** 2 * (self.beta_1 - self.beta_0) - 0.5 * t * self.beta_0
137
+ elif self.schedule == 'cosine':
138
+ log_alpha_fn = lambda s: torch.log(torch.cos((s + self.cosine_s) / (1. + self.cosine_s) * math.pi / 2.))
139
+ log_alpha_t = log_alpha_fn(t) - self.cosine_log_alpha_0
140
+ return log_alpha_t
141
+
142
+ def marginal_alpha(self, t):
143
+ """
144
+ Compute alpha_t of a given continuous-time label t in [0, T].
145
+ """
146
+ return torch.exp(self.marginal_log_mean_coeff(t))
147
+
148
+ def marginal_std(self, t):
149
+ """
150
+ Compute sigma_t of a given continuous-time label t in [0, T].
151
+ """
152
+ return torch.sqrt(1. - torch.exp(2. * self.marginal_log_mean_coeff(t)))
153
+
154
+ def marginal_lambda(self, t):
155
+ """
156
+ Compute lambda_t = log(alpha_t) - log(sigma_t) of a given continuous-time label t in [0, T].
157
+ """
158
+ log_mean_coeff = self.marginal_log_mean_coeff(t)
159
+ log_std = 0.5 * torch.log(1. - torch.exp(2. * log_mean_coeff))
160
+ return log_mean_coeff - log_std
161
+
162
+ def inverse_lambda(self, lamb):
163
+ """
164
+ Compute the continuous-time label t in [0, T] of a given half-logSNR lambda_t.
165
+ """
166
+ if self.schedule == 'linear':
167
+ tmp = 2. * (self.beta_1 - self.beta_0) * torch.logaddexp(-2. * lamb, torch.zeros((1,)).to(lamb))
168
+ Delta = self.beta_0**2 + tmp
169
+ return tmp / (torch.sqrt(Delta) + self.beta_0) / (self.beta_1 - self.beta_0)
170
+ elif self.schedule == 'discrete':
171
+ log_alpha = -0.5 * torch.logaddexp(torch.zeros((1,)).to(lamb.device), -2. * lamb)
172
+ t = interpolate_fn(log_alpha.reshape((-1, 1)), torch.flip(self.log_alpha_array.to(lamb.device), [1]), torch.flip(self.t_array.to(lamb.device), [1]))
173
+ return t.reshape((-1,))
174
+ else:
175
+ log_alpha = -0.5 * torch.logaddexp(-2. * lamb, torch.zeros((1,)).to(lamb))
176
+ t_fn = lambda log_alpha_t: torch.arccos(torch.exp(log_alpha_t + self.cosine_log_alpha_0)) * 2. * (1. + self.cosine_s) / math.pi - self.cosine_s
177
+ t = t_fn(log_alpha)
178
+ return t
179
+
180
+
181
+ def model_wrapper(
182
+ model,
183
+ noise_schedule,
184
+ model_type="noise",
185
+ model_kwargs={},
186
+ guidance_type="uncond",
187
+ condition=None,
188
+ unconditional_condition=None,
189
+ guidance_scale=1.,
190
+ classifier_fn=None,
191
+ classifier_kwargs={},
192
+ ):
193
+ """Create a wrapper function for the noise prediction model.
194
+
195
+ DPM-Solver needs to solve the continuous-time diffusion ODEs. For DPMs trained on discrete-time labels, we need to
196
+ firstly wrap the model function to a noise prediction model that accepts the continuous time as the input.
197
+
198
+ We support four types of the diffusion model by setting `model_type`:
199
+
200
+ 1. "noise": noise prediction model. (Trained by predicting noise).
201
+
202
+ 2. "x_start": data prediction model. (Trained by predicting the data x_0 at time 0).
203
+
204
+ 3. "v": velocity prediction model. (Trained by predicting the velocity).
205
+ The "v" prediction is derivation detailed in Appendix D of [1], and is used in Imagen-Video [2].
206
+
207
+ [1] Salimans, Tim, and Jonathan Ho. "Progressive distillation for fast sampling of diffusion models."
208
+ arXiv preprint arXiv:2202.00512 (2022).
209
+ [2] Ho, Jonathan, et al. "Imagen Video: High Definition Video Generation with Diffusion Models."
210
+ arXiv preprint arXiv:2210.02303 (2022).
211
+
212
+ 4. "score": marginal score function. (Trained by denoising score matching).
213
+ Note that the score function and the noise prediction model follows a simple relationship:
214
+ ```
215
+ noise(x_t, t) = -sigma_t * score(x_t, t)
216
+ ```
217
+
218
+ We support three types of guided sampling by DPMs by setting `guidance_type`:
219
+ 1. "uncond": unconditional sampling by DPMs.
220
+ The input `model` has the following format:
221
+ ``
222
+ model(x, t_input, **model_kwargs) -> noise | x_start | v | score
223
+ ``
224
+
225
+ 2. "classifier": classifier guidance sampling [3] by DPMs and another classifier.
226
+ The input `model` has the following format:
227
+ ``
228
+ model(x, t_input, **model_kwargs) -> noise | x_start | v | score
229
+ ``
230
+
231
+ The input `classifier_fn` has the following format:
232
+ ``
233
+ classifier_fn(x, t_input, cond, **classifier_kwargs) -> logits(x, t_input, cond)
234
+ ``
235
+
236
+ [3] P. Dhariwal and A. Q. Nichol, "Diffusion models beat GANs on image synthesis,"
237
+ in Advances in Neural Information Processing Systems, vol. 34, 2021, pp. 8780-8794.
238
+
239
+ 3. "classifier-free": classifier-free guidance sampling by conditional DPMs.
240
+ The input `model` has the following format:
241
+ ``
242
+ model(x, t_input, cond, **model_kwargs) -> noise | x_start | v | score
243
+ ``
244
+ And if cond == `unconditional_condition`, the model output is the unconditional DPM output.
245
+
246
+ [4] Ho, Jonathan, and Tim Salimans. "Classifier-free diffusion guidance."
247
+ arXiv preprint arXiv:2207.12598 (2022).
248
+
249
+
250
+ The `t_input` is the time label of the model, which may be discrete-time labels (i.e. 0 to 999)
251
+ or continuous-time labels (i.e. epsilon to T).
252
+
253
+ We wrap the model function to accept only `x` and `t_continuous` as inputs, and outputs the predicted noise:
254
+ ``
255
+ def model_fn(x, t_continuous) -> noise:
256
+ t_input = get_model_input_time(t_continuous)
257
+ return noise_pred(model, x, t_input, **model_kwargs)
258
+ ``
259
+ where `t_continuous` is the continuous time labels (i.e. epsilon to T). And we use `model_fn` for DPM-Solver.
260
+
261
+ ===============================================================
262
+
263
+ Args:
264
+ model: A diffusion model with the corresponding format described above.
265
+ noise_schedule: A noise schedule object, such as NoiseScheduleVP.
266
+ model_type: A `str`. The parameterization type of the diffusion model.
267
+ "noise" or "x_start" or "v" or "score".
268
+ model_kwargs: A `dict`. A dict for the other inputs of the model function.
269
+ guidance_type: A `str`. The type of the guidance for sampling.
270
+ "uncond" or "classifier" or "classifier-free".
271
+ condition: A pytorch tensor. The condition for the guided sampling.
272
+ Only used for "classifier" or "classifier-free" guidance type.
273
+ unconditional_condition: A pytorch tensor. The condition for the unconditional sampling.
274
+ Only used for "classifier-free" guidance type.
275
+ guidance_scale: A `float`. The scale for the guided sampling.
276
+ classifier_fn: A classifier function. Only used for the classifier guidance.
277
+ classifier_kwargs: A `dict`. A dict for the other inputs of the classifier function.
278
+ Returns:
279
+ A noise prediction model that accepts the noised data and the continuous time as the inputs.
280
+ """
281
+
282
+ def get_model_input_time(t_continuous):
283
+ """
284
+ Convert the continuous-time `t_continuous` (in [epsilon, T]) to the model input time.
285
+ For discrete-time DPMs, we convert `t_continuous` in [1 / N, 1] to `t_input` in [0, 1000 * (N - 1) / N].
286
+ For continuous-time DPMs, we just use `t_continuous`.
287
+ """
288
+ if noise_schedule.schedule == 'discrete':
289
+ return (t_continuous - 1. / noise_schedule.total_N) * 1000.
290
+ else:
291
+ return t_continuous
292
+
293
+ def noise_pred_fn(x, t_continuous, cond=None):
294
+ if t_continuous.reshape((-1,)).shape[0] == 1:
295
+ t_continuous = t_continuous.expand((x.shape[0]))
296
+ t_input = get_model_input_time(t_continuous)
297
+ output = model(x, t_input, **model_kwargs)
298
+ if model_type == "noise":
299
+ return output
300
+ elif model_type == "x_start":
301
+ alpha_t, sigma_t = noise_schedule.marginal_alpha(t_continuous), noise_schedule.marginal_std(t_continuous)
302
+ dims = x.dim()
303
+ return (x - expand_dims(alpha_t, dims) * output) / expand_dims(sigma_t, dims)
304
+ elif model_type == "v":
305
+ alpha_t, sigma_t = noise_schedule.marginal_alpha(t_continuous), noise_schedule.marginal_std(t_continuous)
306
+ dims = x.dim()
307
+ return expand_dims(alpha_t, dims) * output + expand_dims(sigma_t, dims) * x
308
+ elif model_type == "score":
309
+ sigma_t = noise_schedule.marginal_std(t_continuous)
310
+ dims = x.dim()
311
+ return -expand_dims(sigma_t, dims) * output
312
+
313
+ def cond_grad_fn(x, t_input):
314
+ """
315
+ Compute the gradient of the classifier, i.e. nabla_{x} log p_t(cond | x_t).
316
+ """
317
+ with torch.enable_grad():
318
+ x_in = x.detach().requires_grad_(True)
319
+ log_prob = classifier_fn(x_in, t_input, condition, **classifier_kwargs)
320
+ return torch.autograd.grad(log_prob.sum(), x_in)[0]
321
+
322
+ def model_fn(x, t_continuous):
323
+ """
324
+ The noise predicition model function that is used for DPM-Solver.
325
+ """
326
+ if t_continuous.reshape((-1,)).shape[0] == 1:
327
+ t_continuous = t_continuous.expand((x.shape[0]))
328
+ if guidance_type == "uncond":
329
+ return noise_pred_fn(x, t_continuous)
330
+ elif guidance_type == "classifier":
331
+ assert classifier_fn is not None
332
+ t_input = get_model_input_time(t_continuous)
333
+ cond_grad = cond_grad_fn(x, t_input)
334
+ sigma_t = noise_schedule.marginal_std(t_continuous)
335
+ noise = noise_pred_fn(x, t_continuous)
336
+ return noise - guidance_scale * expand_dims(sigma_t, dims=cond_grad.dim()) * cond_grad
337
+ elif guidance_type == "classifier-free":
338
+ if guidance_scale == 1. or unconditional_condition is None:
339
+ return noise_pred_fn(x, t_continuous, cond=condition)
340
+ else:
341
+ x_in = torch.cat([x] * 2)
342
+ t_in = torch.cat([t_continuous] * 2)
343
+ c_in = torch.cat([unconditional_condition, condition])
344
+ noise_uncond, noise = noise_pred_fn(x_in, t_in, cond=c_in).chunk(2)
345
+ return noise_uncond + guidance_scale * (noise - noise_uncond)
346
+
347
+ assert model_type in ["noise", "x_start", "v"]
348
+ assert guidance_type in ["uncond", "classifier", "classifier-free"]
349
+ return model_fn
350
+
351
+
352
+ class UniPC:
353
+ def __init__(
354
+ self,
355
+ model_fn,
356
+ noise_schedule,
357
+ predict_x0=True,
358
+ thresholding=False,
359
+ max_val=1.,
360
+ variant='bh1',
361
+ ):
362
+ """Construct a UniPC.
363
+
364
+ We support both data_prediction and noise_prediction.
365
+ """
366
+ self.model = model_fn
367
+ self.noise_schedule = noise_schedule
368
+ self.variant = variant
369
+ self.predict_x0 = predict_x0
370
+ self.thresholding = thresholding
371
+ self.max_val = max_val
372
+
373
+ def dynamic_thresholding_fn(self, x0, t=None):
374
+ """
375
+ The dynamic thresholding method.
376
+ """
377
+ dims = x0.dim()
378
+ p = self.dynamic_thresholding_ratio
379
+ s = torch.quantile(torch.abs(x0).reshape((x0.shape[0], -1)), p, dim=1)
380
+ s = expand_dims(torch.maximum(s, self.thresholding_max_val * torch.ones_like(s).to(s.device)), dims)
381
+ x0 = torch.clamp(x0, -s, s) / s
382
+ return x0
383
+
384
+ def noise_prediction_fn(self, x, t):
385
+ """
386
+ Return the noise prediction model.
387
+ """
388
+ return self.model(x, t)
389
+
390
+ def data_prediction_fn(self, x, t):
391
+ """
392
+ Return the data prediction model (with thresholding).
393
+ """
394
+ noise = self.noise_prediction_fn(x, t)
395
+ dims = x.dim()
396
+ alpha_t, sigma_t = self.noise_schedule.marginal_alpha(t), self.noise_schedule.marginal_std(t)
397
+ x0 = (x - expand_dims(sigma_t, dims) * noise) / expand_dims(alpha_t, dims)
398
+ if self.thresholding:
399
+ p = 0.995 # A hyperparameter in the paper of "Imagen" [1].
400
+ s = torch.quantile(torch.abs(x0).reshape((x0.shape[0], -1)), p, dim=1)
401
+ s = expand_dims(torch.maximum(s, self.max_val * torch.ones_like(s).to(s.device)), dims)
402
+ x0 = torch.clamp(x0, -s, s) / s
403
+ return x0
404
+
405
+ def model_fn(self, x, t):
406
+ """
407
+ Convert the model to the noise prediction model or the data prediction model.
408
+ """
409
+ if self.predict_x0:
410
+ return self.data_prediction_fn(x, t)
411
+ else:
412
+ return self.noise_prediction_fn(x, t)
413
+
414
+ def get_time_steps(self, skip_type, t_T, t_0, N, device):
415
+ """Compute the intermediate time steps for sampling.
416
+ """
417
+ if skip_type == 'logSNR':
418
+ lambda_T = self.noise_schedule.marginal_lambda(torch.tensor(t_T).to(device))
419
+ lambda_0 = self.noise_schedule.marginal_lambda(torch.tensor(t_0).to(device))
420
+ logSNR_steps = torch.linspace(lambda_T.cpu().item(), lambda_0.cpu().item(), N + 1).to(device)
421
+ return self.noise_schedule.inverse_lambda(logSNR_steps)
422
+ elif skip_type == 'time_uniform':
423
+ return torch.linspace(t_T, t_0, N + 1).to(device)
424
+ elif skip_type == 'time_quadratic':
425
+ t_order = 2
426
+ t = torch.linspace(t_T**(1. / t_order), t_0**(1. / t_order), N + 1).pow(t_order).to(device)
427
+ return t
428
+ else:
429
+ raise ValueError("Unsupported skip_type {}, need to be 'logSNR' or 'time_uniform' or 'time_quadratic'".format(skip_type))
430
+
431
+ def get_orders_and_timesteps_for_singlestep_solver(self, steps, order, skip_type, t_T, t_0, device):
432
+ """
433
+ Get the order of each step for sampling by the singlestep DPM-Solver.
434
+ """
435
+ if order == 3:
436
+ K = steps // 3 + 1
437
+ if steps % 3 == 0:
438
+ orders = [3,] * (K - 2) + [2, 1]
439
+ elif steps % 3 == 1:
440
+ orders = [3,] * (K - 1) + [1]
441
+ else:
442
+ orders = [3,] * (K - 1) + [2]
443
+ elif order == 2:
444
+ if steps % 2 == 0:
445
+ K = steps // 2
446
+ orders = [2,] * K
447
+ else:
448
+ K = steps // 2 + 1
449
+ orders = [2,] * (K - 1) + [1]
450
+ elif order == 1:
451
+ K = steps
452
+ orders = [1,] * steps
453
+ else:
454
+ raise ValueError("'order' must be '1' or '2' or '3'.")
455
+ if skip_type == 'logSNR':
456
+ # To reproduce the results in DPM-Solver paper
457
+ timesteps_outer = self.get_time_steps(skip_type, t_T, t_0, K, device)
458
+ else:
459
+ timesteps_outer = self.get_time_steps(skip_type, t_T, t_0, steps, device)[torch.cumsum(torch.tensor([0,] + orders), 0).to(device)]
460
+ return timesteps_outer, orders
461
+
462
+ def denoise_to_zero_fn(self, x, s):
463
+ """
464
+ Denoise at the final step, which is equivalent to solve the ODE from lambda_s to infty by first-order discretization.
465
+ """
466
+ return self.data_prediction_fn(x, s)
467
+
468
+ def multistep_uni_pc_update(self, x, model_prev_list, t_prev_list, t, order, **kwargs):
469
+ if len(t.shape) == 0:
470
+ t = t.view(-1)
471
+ if 'bh' in self.variant:
472
+ return self.multistep_uni_pc_bh_update(x, model_prev_list, t_prev_list, t, order, **kwargs)
473
+ else:
474
+ assert self.variant == 'vary_coeff'
475
+ return self.multistep_uni_pc_vary_update(x, model_prev_list, t_prev_list, t, order, **kwargs)
476
+
477
+ def multistep_uni_pc_vary_update(self, x, model_prev_list, t_prev_list, t, order, use_corrector=True):
478
+ logging.info(f'using unified predictor-corrector with order {order} (solver type: vary coeff)')
479
+ ns = self.noise_schedule
480
+ assert order <= len(model_prev_list)
481
+
482
+ # first compute rks
483
+ t_prev_0 = t_prev_list[-1]
484
+ lambda_prev_0 = ns.marginal_lambda(t_prev_0)
485
+ lambda_t = ns.marginal_lambda(t)
486
+ model_prev_0 = model_prev_list[-1]
487
+ sigma_prev_0, sigma_t = ns.marginal_std(t_prev_0), ns.marginal_std(t)
488
+ log_alpha_t = ns.marginal_log_mean_coeff(t)
489
+ alpha_t = torch.exp(log_alpha_t)
490
+
491
+ h = lambda_t - lambda_prev_0
492
+
493
+ rks = []
494
+ D1s = []
495
+ for i in range(1, order):
496
+ t_prev_i = t_prev_list[-(i + 1)]
497
+ model_prev_i = model_prev_list[-(i + 1)]
498
+ lambda_prev_i = ns.marginal_lambda(t_prev_i)
499
+ rk = (lambda_prev_i - lambda_prev_0) / h
500
+ rks.append(rk)
501
+ D1s.append((model_prev_i - model_prev_0) / rk)
502
+
503
+ rks.append(1.)
504
+ rks = torch.tensor(rks, device=x.device)
505
+
506
+ K = len(rks)
507
+ # build C matrix
508
+ C = []
509
+
510
+ col = torch.ones_like(rks)
511
+ for k in range(1, K + 1):
512
+ C.append(col)
513
+ col = col * rks / (k + 1)
514
+ C = torch.stack(C, dim=1)
515
+
516
+ if len(D1s) > 0:
517
+ D1s = torch.stack(D1s, dim=1) # (B, K)
518
+ C_inv_p = torch.linalg.inv(C[:-1, :-1])
519
+ A_p = C_inv_p
520
+
521
+ if use_corrector:
522
+ C_inv = torch.linalg.inv(C)
523
+ A_c = C_inv
524
+
525
+ hh = -h if self.predict_x0 else h
526
+ h_phi_1 = torch.expm1(hh)
527
+ h_phi_ks = []
528
+ factorial_k = 1
529
+ h_phi_k = h_phi_1
530
+ for k in range(1, K + 2):
531
+ h_phi_ks.append(h_phi_k)
532
+ h_phi_k = h_phi_k / hh - 1 / factorial_k
533
+ factorial_k *= (k + 1)
534
+
535
+ model_t = None
536
+ if self.predict_x0:
537
+ x_t_ = (
538
+ sigma_t / sigma_prev_0 * x
539
+ - alpha_t * h_phi_1 * model_prev_0
540
+ )
541
+ # now predictor
542
+ x_t = x_t_
543
+ if len(D1s) > 0:
544
+ # compute the residuals for predictor
545
+ for k in range(K - 1):
546
+ x_t = x_t - alpha_t * h_phi_ks[k + 1] * torch.einsum('bkchw,k->bchw', D1s, A_p[k])
547
+ # now corrector
548
+ if use_corrector:
549
+ model_t = self.model_fn(x_t, t)
550
+ D1_t = (model_t - model_prev_0)
551
+ x_t = x_t_
552
+ k = 0
553
+ for k in range(K - 1):
554
+ x_t = x_t - alpha_t * h_phi_ks[k + 1] * torch.einsum('bkchw,k->bchw', D1s, A_c[k][:-1])
555
+ x_t = x_t - alpha_t * h_phi_ks[K] * (D1_t * A_c[k][-1])
556
+ else:
557
+ log_alpha_prev_0, log_alpha_t = ns.marginal_log_mean_coeff(t_prev_0), ns.marginal_log_mean_coeff(t)
558
+ x_t_ = (
559
+ (torch.exp(log_alpha_t - log_alpha_prev_0)) * x
560
+ - (sigma_t * h_phi_1) * model_prev_0
561
+ )
562
+ # now predictor
563
+ x_t = x_t_
564
+ if len(D1s) > 0:
565
+ # compute the residuals for predictor
566
+ for k in range(K - 1):
567
+ x_t = x_t - sigma_t * h_phi_ks[k + 1] * torch.einsum('bkchw,k->bchw', D1s, A_p[k])
568
+ # now corrector
569
+ if use_corrector:
570
+ model_t = self.model_fn(x_t, t)
571
+ D1_t = (model_t - model_prev_0)
572
+ x_t = x_t_
573
+ k = 0
574
+ for k in range(K - 1):
575
+ x_t = x_t - sigma_t * h_phi_ks[k + 1] * torch.einsum('bkchw,k->bchw', D1s, A_c[k][:-1])
576
+ x_t = x_t - sigma_t * h_phi_ks[K] * (D1_t * A_c[k][-1])
577
+ return x_t, model_t
578
+
579
+ def multistep_uni_pc_bh_update(self, x, model_prev_list, t_prev_list, t, order, x_t=None, use_corrector=True):
580
+ # print(f'using unified predictor-corrector with order {order} (solver type: B(h))')
581
+ ns = self.noise_schedule
582
+ assert order <= len(model_prev_list)
583
+ dims = x.dim()
584
+
585
+ # first compute rks
586
+ t_prev_0 = t_prev_list[-1]
587
+ lambda_prev_0 = ns.marginal_lambda(t_prev_0)
588
+ lambda_t = ns.marginal_lambda(t)
589
+ model_prev_0 = model_prev_list[-1]
590
+ sigma_prev_0, sigma_t = ns.marginal_std(t_prev_0), ns.marginal_std(t)
591
+ log_alpha_prev_0, log_alpha_t = ns.marginal_log_mean_coeff(t_prev_0), ns.marginal_log_mean_coeff(t)
592
+ alpha_t = torch.exp(log_alpha_t)
593
+
594
+ h = lambda_t - lambda_prev_0
595
+
596
+ rks = []
597
+ D1s = []
598
+ for i in range(1, order):
599
+ t_prev_i = t_prev_list[-(i + 1)]
600
+ model_prev_i = model_prev_list[-(i + 1)]
601
+ lambda_prev_i = ns.marginal_lambda(t_prev_i)
602
+ rk = ((lambda_prev_i - lambda_prev_0) / h)[0]
603
+ rks.append(rk)
604
+ D1s.append((model_prev_i - model_prev_0) / rk)
605
+
606
+ rks.append(1.)
607
+ rks = torch.tensor(rks, device=x.device)
608
+
609
+ R = []
610
+ b = []
611
+
612
+ hh = -h[0] if self.predict_x0 else h[0]
613
+ h_phi_1 = torch.expm1(hh) # h\phi_1(h) = e^h - 1
614
+ h_phi_k = h_phi_1 / hh - 1
615
+
616
+ factorial_i = 1
617
+
618
+ if self.variant == 'bh1':
619
+ B_h = hh
620
+ elif self.variant == 'bh2':
621
+ B_h = torch.expm1(hh)
622
+ else:
623
+ raise NotImplementedError()
624
+
625
+ for i in range(1, order + 1):
626
+ R.append(torch.pow(rks, i - 1))
627
+ b.append(h_phi_k * factorial_i / B_h)
628
+ factorial_i *= (i + 1)
629
+ h_phi_k = h_phi_k / hh - 1 / factorial_i
630
+
631
+ R = torch.stack(R)
632
+ b = torch.tensor(b, device=x.device)
633
+
634
+ # now predictor
635
+ use_predictor = len(D1s) > 0 and x_t is None
636
+ if len(D1s) > 0:
637
+ D1s = torch.stack(D1s, dim=1) # (B, K)
638
+ if x_t is None:
639
+ # for order 2, we use a simplified version
640
+ if order == 2:
641
+ rhos_p = torch.tensor([0.5], device=b.device)
642
+ else:
643
+ rhos_p = torch.linalg.solve(R[:-1, :-1], b[:-1])
644
+ else:
645
+ D1s = None
646
+
647
+ if use_corrector:
648
+ # print('using corrector')
649
+ # for order 1, we use a simplified version
650
+ if order == 1:
651
+ rhos_c = torch.tensor([0.5], device=b.device)
652
+ else:
653
+ rhos_c = torch.linalg.solve(R, b)
654
+
655
+ model_t = None
656
+ if self.predict_x0:
657
+ x_t_ = (
658
+ expand_dims(sigma_t / sigma_prev_0, dims) * x
659
+ - expand_dims(alpha_t * h_phi_1, dims)* model_prev_0
660
+ )
661
+
662
+ if x_t is None:
663
+ if use_predictor:
664
+ pred_res = torch.einsum('k,bkchw->bchw', rhos_p, D1s)
665
+ else:
666
+ pred_res = 0
667
+ x_t = x_t_ - expand_dims(alpha_t * B_h, dims) * pred_res
668
+
669
+ if use_corrector:
670
+ model_t = self.model_fn(x_t, t)
671
+ if D1s is not None:
672
+ corr_res = torch.einsum('k,bkchw->bchw', rhos_c[:-1], D1s)
673
+ else:
674
+ corr_res = 0
675
+ D1_t = (model_t - model_prev_0)
676
+ x_t = x_t_ - expand_dims(alpha_t * B_h, dims) * (corr_res + rhos_c[-1] * D1_t)
677
+ else:
678
+ x_t_ = (
679
+ expand_dims(torch.exp(log_alpha_t - log_alpha_prev_0), dims) * x
680
+ - expand_dims(sigma_t * h_phi_1, dims) * model_prev_0
681
+ )
682
+ if x_t is None:
683
+ if use_predictor:
684
+ pred_res = torch.einsum('k,bkchw->bchw', rhos_p, D1s)
685
+ else:
686
+ pred_res = 0
687
+ x_t = x_t_ - expand_dims(sigma_t * B_h, dims) * pred_res
688
+
689
+ if use_corrector:
690
+ model_t = self.model_fn(x_t, t)
691
+ if D1s is not None:
692
+ corr_res = torch.einsum('k,bkchw->bchw', rhos_c[:-1], D1s)
693
+ else:
694
+ corr_res = 0
695
+ D1_t = (model_t - model_prev_0)
696
+ x_t = x_t_ - expand_dims(sigma_t * B_h, dims) * (corr_res + rhos_c[-1] * D1_t)
697
+ return x_t, model_t
698
+
699
+
700
+ def sample(self, x, timesteps, t_start=None, t_end=None, order=3, skip_type='time_uniform',
701
+ method='singlestep', lower_order_final=True, denoise_to_zero=False, solver_type='dpm_solver',
702
+ atol=0.0078, rtol=0.05, corrector=False, callback=None, disable_pbar=False
703
+ ):
704
+ # t_0 = 1. / self.noise_schedule.total_N if t_end is None else t_end
705
+ # t_T = self.noise_schedule.T if t_start is None else t_start
706
+ steps = len(timesteps) - 1
707
+ if method == 'multistep':
708
+ assert steps >= order
709
+ # timesteps = self.get_time_steps(skip_type=skip_type, t_T=t_T, t_0=t_0, N=steps, device=device)
710
+ assert timesteps.shape[0] - 1 == steps
711
+ # with torch.no_grad():
712
+ for step_index in trange(steps, disable=disable_pbar):
713
+ if step_index == 0:
714
+ vec_t = timesteps[0].expand((x.shape[0]))
715
+ model_prev_list = [self.model_fn(x, vec_t)]
716
+ t_prev_list = [vec_t]
717
+ elif step_index < order:
718
+ init_order = step_index
719
+ # Init the first `order` values by lower order multistep DPM-Solver.
720
+ # for init_order in range(1, order):
721
+ vec_t = timesteps[init_order].expand(x.shape[0])
722
+ x, model_x = self.multistep_uni_pc_update(x, model_prev_list, t_prev_list, vec_t, init_order, use_corrector=True)
723
+ if model_x is None:
724
+ model_x = self.model_fn(x, vec_t)
725
+ model_prev_list.append(model_x)
726
+ t_prev_list.append(vec_t)
727
+ else:
728
+ extra_final_step = 0
729
+ if step_index == (steps - 1):
730
+ extra_final_step = 1
731
+ for step in range(step_index, step_index + 1 + extra_final_step):
732
+ vec_t = timesteps[step].expand(x.shape[0])
733
+ if lower_order_final:
734
+ step_order = min(order, steps + 1 - step)
735
+ else:
736
+ step_order = order
737
+ # print('this step order:', step_order)
738
+ if step == steps:
739
+ # print('do not run corrector at the last step')
740
+ use_corrector = False
741
+ else:
742
+ use_corrector = True
743
+ x, model_x = self.multistep_uni_pc_update(x, model_prev_list, t_prev_list, vec_t, step_order, use_corrector=use_corrector)
744
+ for i in range(order - 1):
745
+ t_prev_list[i] = t_prev_list[i + 1]
746
+ model_prev_list[i] = model_prev_list[i + 1]
747
+ t_prev_list[-1] = vec_t
748
+ # We do not need to evaluate the final model value.
749
+ if step < steps:
750
+ if model_x is None:
751
+ model_x = self.model_fn(x, vec_t)
752
+ model_prev_list[-1] = model_x
753
+ if callback is not None:
754
+ callback({'x': x, 'i': step_index, 'denoised': model_prev_list[-1]})
755
+ else:
756
+ raise NotImplementedError()
757
+ # if denoise_to_zero:
758
+ # x = self.denoise_to_zero_fn(x, torch.ones((x.shape[0],)).to(device) * t_0)
759
+ return x
760
+
761
+
762
+ #############################################################
763
+ # other utility functions
764
+ #############################################################
765
+
766
+ def interpolate_fn(x, xp, yp):
767
+ """
768
+ A piecewise linear function y = f(x), using xp and yp as keypoints.
769
+ We implement f(x) in a differentiable way (i.e. applicable for autograd).
770
+ The function f(x) is well-defined for all x-axis. (For x beyond the bounds of xp, we use the outmost points of xp to define the linear function.)
771
+
772
+ Args:
773
+ x: PyTorch tensor with shape [N, C], where N is the batch size, C is the number of channels (we use C = 1 for DPM-Solver).
774
+ xp: PyTorch tensor with shape [C, K], where K is the number of keypoints.
775
+ yp: PyTorch tensor with shape [C, K].
776
+ Returns:
777
+ The function values f(x), with shape [N, C].
778
+ """
779
+ N, K = x.shape[0], xp.shape[1]
780
+ all_x = torch.cat([x.unsqueeze(2), xp.unsqueeze(0).repeat((N, 1, 1))], dim=2)
781
+ sorted_all_x, x_indices = torch.sort(all_x, dim=2)
782
+ x_idx = torch.argmin(x_indices, dim=2)
783
+ cand_start_idx = x_idx - 1
784
+ start_idx = torch.where(
785
+ torch.eq(x_idx, 0),
786
+ torch.tensor(1, device=x.device),
787
+ torch.where(
788
+ torch.eq(x_idx, K), torch.tensor(K - 2, device=x.device), cand_start_idx,
789
+ ),
790
+ )
791
+ end_idx = torch.where(torch.eq(start_idx, cand_start_idx), start_idx + 2, start_idx + 1)
792
+ start_x = torch.gather(sorted_all_x, dim=2, index=start_idx.unsqueeze(2)).squeeze(2)
793
+ end_x = torch.gather(sorted_all_x, dim=2, index=end_idx.unsqueeze(2)).squeeze(2)
794
+ start_idx2 = torch.where(
795
+ torch.eq(x_idx, 0),
796
+ torch.tensor(0, device=x.device),
797
+ torch.where(
798
+ torch.eq(x_idx, K), torch.tensor(K - 2, device=x.device), cand_start_idx,
799
+ ),
800
+ )
801
+ y_positions_expanded = yp.unsqueeze(0).expand(N, -1, -1)
802
+ start_y = torch.gather(y_positions_expanded, dim=2, index=start_idx2.unsqueeze(2)).squeeze(2)
803
+ end_y = torch.gather(y_positions_expanded, dim=2, index=(start_idx2 + 1).unsqueeze(2)).squeeze(2)
804
+ cand = start_y + (x - start_x) * (end_y - start_y) / (end_x - start_x)
805
+ return cand
806
+
807
+
808
+ def expand_dims(v, dims):
809
+ """
810
+ Expand the tensor `v` to the dim `dims`.
811
+
812
+ Args:
813
+ `v`: a PyTorch tensor with shape [N].
814
+ `dim`: a `int`.
815
+ Returns:
816
+ a PyTorch tensor with shape [N, 1, 1, ..., 1] and the total dimension is `dims`.
817
+ """
818
+ return v[(...,) + (None,)*(dims - 1)]
819
+
820
+
821
+ class SigmaConvert:
822
+ schedule = ""
823
+ def marginal_log_mean_coeff(self, sigma):
824
+ return 0.5 * torch.log(1 / ((sigma * sigma) + 1))
825
+
826
+ def marginal_alpha(self, t):
827
+ return torch.exp(self.marginal_log_mean_coeff(t))
828
+
829
+ def marginal_std(self, t):
830
+ return torch.sqrt(1. - torch.exp(2. * self.marginal_log_mean_coeff(t)))
831
+
832
+ def marginal_lambda(self, t):
833
+ """
834
+ Compute lambda_t = log(alpha_t) - log(sigma_t) of a given continuous-time label t in [0, T].
835
+ """
836
+ log_mean_coeff = self.marginal_log_mean_coeff(t)
837
+ log_std = 0.5 * torch.log(1. - torch.exp(2. * log_mean_coeff))
838
+ return log_mean_coeff - log_std
839
+
840
+ def predict_eps_sigma(model, input, sigma_in, **kwargs):
841
+ sigma = sigma_in.view(sigma_in.shape[:1] + (1,) * (input.ndim - 1))
842
+ input = input * ((sigma ** 2 + 1.0) ** 0.5)
843
+ return (input - model(input, sigma_in, **kwargs)) / sigma
844
+
845
+
846
+ def sample_unipc(model, noise, sigmas, extra_args=None, callback=None, disable=False, variant='bh1'):
847
+ timesteps = sigmas.clone()
848
+ if sigmas[-1] == 0:
849
+ timesteps = sigmas[:]
850
+ timesteps[-1] = 0.001
851
+ else:
852
+ timesteps = sigmas.clone()
853
+ ns = SigmaConvert()
854
+
855
+ noise = noise / torch.sqrt(1.0 + timesteps[0] ** 2.0)
856
+ model_type = "noise"
857
+
858
+ model_fn = model_wrapper(
859
+ lambda input, sigma, **kwargs: predict_eps_sigma(model, input, sigma, **kwargs),
860
+ ns,
861
+ model_type=model_type,
862
+ guidance_type="uncond",
863
+ model_kwargs=extra_args,
864
+ )
865
+
866
+ order = min(3, len(timesteps) - 2)
867
+ uni_pc = UniPC(model_fn, ns, predict_x0=True, thresholding=False, variant=variant)
868
+ x = uni_pc.sample(noise, timesteps=timesteps, skip_type="time_uniform", method="multistep", order=order, lower_order_final=True, callback=callback, disable_pbar=disable)
869
+ x /= ns.marginal_alpha(timesteps[-1])
870
+ return x
871
+
872
+ def sample_unipc_bh2(model, noise, sigmas, extra_args=None, callback=None, disable=False):
873
+ return sample_unipc(model, noise, sigmas, extra_args, callback, disable, variant='bh2')
comfy/float.py ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ def calc_mantissa(abs_x, exponent, normal_mask, MANTISSA_BITS, EXPONENT_BIAS, generator=None):
4
+ mantissa_scaled = torch.where(
5
+ normal_mask,
6
+ (abs_x / (2.0 ** (exponent - EXPONENT_BIAS)) - 1.0) * (2**MANTISSA_BITS),
7
+ (abs_x / (2.0 ** (-EXPONENT_BIAS + 1 - MANTISSA_BITS)))
8
+ )
9
+
10
+ mantissa_scaled += torch.rand(mantissa_scaled.size(), dtype=mantissa_scaled.dtype, layout=mantissa_scaled.layout, device=mantissa_scaled.device, generator=generator)
11
+ return mantissa_scaled.floor() / (2**MANTISSA_BITS)
12
+
13
+ #Not 100% sure about this
14
+ def manual_stochastic_round_to_float8(x, dtype, generator=None):
15
+ if dtype == torch.float8_e4m3fn:
16
+ EXPONENT_BITS, MANTISSA_BITS, EXPONENT_BIAS = 4, 3, 7
17
+ elif dtype == torch.float8_e5m2:
18
+ EXPONENT_BITS, MANTISSA_BITS, EXPONENT_BIAS = 5, 2, 15
19
+ else:
20
+ raise ValueError("Unsupported dtype")
21
+
22
+ x = x.half()
23
+ sign = torch.sign(x)
24
+ abs_x = x.abs()
25
+ sign = torch.where(abs_x == 0, 0, sign)
26
+
27
+ # Combine exponent calculation and clamping
28
+ exponent = torch.clamp(
29
+ torch.floor(torch.log2(abs_x)) + EXPONENT_BIAS,
30
+ 0, 2**EXPONENT_BITS - 1
31
+ )
32
+
33
+ # Combine mantissa calculation and rounding
34
+ normal_mask = ~(exponent == 0)
35
+
36
+ abs_x[:] = calc_mantissa(abs_x, exponent, normal_mask, MANTISSA_BITS, EXPONENT_BIAS, generator=generator)
37
+
38
+ sign *= torch.where(
39
+ normal_mask,
40
+ (2.0 ** (exponent - EXPONENT_BIAS)) * (1.0 + abs_x),
41
+ (2.0 ** (-EXPONENT_BIAS + 1)) * abs_x
42
+ )
43
+
44
+ inf = torch.finfo(dtype)
45
+ torch.clamp(sign, min=inf.min, max=inf.max, out=sign)
46
+ return sign
47
+
48
+
49
+
50
+ def stochastic_rounding(value, dtype, seed=0):
51
+ if dtype == torch.float32:
52
+ return value.to(dtype=torch.float32)
53
+ if dtype == torch.float16:
54
+ return value.to(dtype=torch.float16)
55
+ if dtype == torch.bfloat16:
56
+ return value.to(dtype=torch.bfloat16)
57
+ if dtype == torch.float8_e4m3fn or dtype == torch.float8_e5m2:
58
+ generator = torch.Generator(device=value.device)
59
+ generator.manual_seed(seed)
60
+ output = torch.empty_like(value, dtype=dtype)
61
+ num_slices = max(1, (value.numel() / (4096 * 4096)))
62
+ slice_size = max(1, round(value.shape[0] / num_slices))
63
+ for i in range(0, value.shape[0], slice_size):
64
+ output[i:i+slice_size].copy_(manual_stochastic_round_to_float8(value[i:i+slice_size], dtype, generator=generator))
65
+ return output
66
+
67
+ return value.to(dtype=dtype)
comfy/gligen.py ADDED
@@ -0,0 +1,344 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import torch
3
+ from torch import nn
4
+ from .ldm.modules.attention import CrossAttention
5
+ from inspect import isfunction
6
+ import comfy.ops
7
+ ops = comfy.ops.manual_cast
8
+
9
+ def exists(val):
10
+ return val is not None
11
+
12
+
13
+ def uniq(arr):
14
+ return{el: True for el in arr}.keys()
15
+
16
+
17
+ def default(val, d):
18
+ if exists(val):
19
+ return val
20
+ return d() if isfunction(d) else d
21
+
22
+
23
+ # feedforward
24
+ class GEGLU(nn.Module):
25
+ def __init__(self, dim_in, dim_out):
26
+ super().__init__()
27
+ self.proj = ops.Linear(dim_in, dim_out * 2)
28
+
29
+ def forward(self, x):
30
+ x, gate = self.proj(x).chunk(2, dim=-1)
31
+ return x * torch.nn.functional.gelu(gate)
32
+
33
+
34
+ class FeedForward(nn.Module):
35
+ def __init__(self, dim, dim_out=None, mult=4, glu=False, dropout=0.):
36
+ super().__init__()
37
+ inner_dim = int(dim * mult)
38
+ dim_out = default(dim_out, dim)
39
+ project_in = nn.Sequential(
40
+ ops.Linear(dim, inner_dim),
41
+ nn.GELU()
42
+ ) if not glu else GEGLU(dim, inner_dim)
43
+
44
+ self.net = nn.Sequential(
45
+ project_in,
46
+ nn.Dropout(dropout),
47
+ ops.Linear(inner_dim, dim_out)
48
+ )
49
+
50
+ def forward(self, x):
51
+ return self.net(x)
52
+
53
+
54
+ class GatedCrossAttentionDense(nn.Module):
55
+ def __init__(self, query_dim, context_dim, n_heads, d_head):
56
+ super().__init__()
57
+
58
+ self.attn = CrossAttention(
59
+ query_dim=query_dim,
60
+ context_dim=context_dim,
61
+ heads=n_heads,
62
+ dim_head=d_head,
63
+ operations=ops)
64
+ self.ff = FeedForward(query_dim, glu=True)
65
+
66
+ self.norm1 = ops.LayerNorm(query_dim)
67
+ self.norm2 = ops.LayerNorm(query_dim)
68
+
69
+ self.register_parameter('alpha_attn', nn.Parameter(torch.tensor(0.)))
70
+ self.register_parameter('alpha_dense', nn.Parameter(torch.tensor(0.)))
71
+
72
+ # this can be useful: we can externally change magnitude of tanh(alpha)
73
+ # for example, when it is set to 0, then the entire model is same as
74
+ # original one
75
+ self.scale = 1
76
+
77
+ def forward(self, x, objs):
78
+
79
+ x = x + self.scale * \
80
+ torch.tanh(self.alpha_attn) * self.attn(self.norm1(x), objs, objs)
81
+ x = x + self.scale * \
82
+ torch.tanh(self.alpha_dense) * self.ff(self.norm2(x))
83
+
84
+ return x
85
+
86
+
87
+ class GatedSelfAttentionDense(nn.Module):
88
+ def __init__(self, query_dim, context_dim, n_heads, d_head):
89
+ super().__init__()
90
+
91
+ # we need a linear projection since we need cat visual feature and obj
92
+ # feature
93
+ self.linear = ops.Linear(context_dim, query_dim)
94
+
95
+ self.attn = CrossAttention(
96
+ query_dim=query_dim,
97
+ context_dim=query_dim,
98
+ heads=n_heads,
99
+ dim_head=d_head,
100
+ operations=ops)
101
+ self.ff = FeedForward(query_dim, glu=True)
102
+
103
+ self.norm1 = ops.LayerNorm(query_dim)
104
+ self.norm2 = ops.LayerNorm(query_dim)
105
+
106
+ self.register_parameter('alpha_attn', nn.Parameter(torch.tensor(0.)))
107
+ self.register_parameter('alpha_dense', nn.Parameter(torch.tensor(0.)))
108
+
109
+ # this can be useful: we can externally change magnitude of tanh(alpha)
110
+ # for example, when it is set to 0, then the entire model is same as
111
+ # original one
112
+ self.scale = 1
113
+
114
+ def forward(self, x, objs):
115
+
116
+ N_visual = x.shape[1]
117
+ objs = self.linear(objs)
118
+
119
+ x = x + self.scale * torch.tanh(self.alpha_attn) * self.attn(
120
+ self.norm1(torch.cat([x, objs], dim=1)))[:, 0:N_visual, :]
121
+ x = x + self.scale * \
122
+ torch.tanh(self.alpha_dense) * self.ff(self.norm2(x))
123
+
124
+ return x
125
+
126
+
127
+ class GatedSelfAttentionDense2(nn.Module):
128
+ def __init__(self, query_dim, context_dim, n_heads, d_head):
129
+ super().__init__()
130
+
131
+ # we need a linear projection since we need cat visual feature and obj
132
+ # feature
133
+ self.linear = ops.Linear(context_dim, query_dim)
134
+
135
+ self.attn = CrossAttention(
136
+ query_dim=query_dim, context_dim=query_dim, dim_head=d_head, operations=ops)
137
+ self.ff = FeedForward(query_dim, glu=True)
138
+
139
+ self.norm1 = ops.LayerNorm(query_dim)
140
+ self.norm2 = ops.LayerNorm(query_dim)
141
+
142
+ self.register_parameter('alpha_attn', nn.Parameter(torch.tensor(0.)))
143
+ self.register_parameter('alpha_dense', nn.Parameter(torch.tensor(0.)))
144
+
145
+ # this can be useful: we can externally change magnitude of tanh(alpha)
146
+ # for example, when it is set to 0, then the entire model is same as
147
+ # original one
148
+ self.scale = 1
149
+
150
+ def forward(self, x, objs):
151
+
152
+ B, N_visual, _ = x.shape
153
+ B, N_ground, _ = objs.shape
154
+
155
+ objs = self.linear(objs)
156
+
157
+ # sanity check
158
+ size_v = math.sqrt(N_visual)
159
+ size_g = math.sqrt(N_ground)
160
+ assert int(size_v) == size_v, "Visual tokens must be square rootable"
161
+ assert int(size_g) == size_g, "Grounding tokens must be square rootable"
162
+ size_v = int(size_v)
163
+ size_g = int(size_g)
164
+
165
+ # select grounding token and resize it to visual token size as residual
166
+ out = self.attn(self.norm1(torch.cat([x, objs], dim=1)))[
167
+ :, N_visual:, :]
168
+ out = out.permute(0, 2, 1).reshape(B, -1, size_g, size_g)
169
+ out = torch.nn.functional.interpolate(
170
+ out, (size_v, size_v), mode='bicubic')
171
+ residual = out.reshape(B, -1, N_visual).permute(0, 2, 1)
172
+
173
+ # add residual to visual feature
174
+ x = x + self.scale * torch.tanh(self.alpha_attn) * residual
175
+ x = x + self.scale * \
176
+ torch.tanh(self.alpha_dense) * self.ff(self.norm2(x))
177
+
178
+ return x
179
+
180
+
181
+ class FourierEmbedder():
182
+ def __init__(self, num_freqs=64, temperature=100):
183
+
184
+ self.num_freqs = num_freqs
185
+ self.temperature = temperature
186
+ self.freq_bands = temperature ** (torch.arange(num_freqs) / num_freqs)
187
+
188
+ @torch.no_grad()
189
+ def __call__(self, x, cat_dim=-1):
190
+ "x: arbitrary shape of tensor. dim: cat dim"
191
+ out = []
192
+ for freq in self.freq_bands:
193
+ out.append(torch.sin(freq * x))
194
+ out.append(torch.cos(freq * x))
195
+ return torch.cat(out, cat_dim)
196
+
197
+
198
+ class PositionNet(nn.Module):
199
+ def __init__(self, in_dim, out_dim, fourier_freqs=8):
200
+ super().__init__()
201
+ self.in_dim = in_dim
202
+ self.out_dim = out_dim
203
+
204
+ self.fourier_embedder = FourierEmbedder(num_freqs=fourier_freqs)
205
+ self.position_dim = fourier_freqs * 2 * 4 # 2 is sin&cos, 4 is xyxy
206
+
207
+ self.linears = nn.Sequential(
208
+ ops.Linear(self.in_dim + self.position_dim, 512),
209
+ nn.SiLU(),
210
+ ops.Linear(512, 512),
211
+ nn.SiLU(),
212
+ ops.Linear(512, out_dim),
213
+ )
214
+
215
+ self.null_positive_feature = torch.nn.Parameter(
216
+ torch.zeros([self.in_dim]))
217
+ self.null_position_feature = torch.nn.Parameter(
218
+ torch.zeros([self.position_dim]))
219
+
220
+ def forward(self, boxes, masks, positive_embeddings):
221
+ B, N, _ = boxes.shape
222
+ masks = masks.unsqueeze(-1)
223
+ positive_embeddings = positive_embeddings
224
+
225
+ # embedding position (it may includes padding as placeholder)
226
+ xyxy_embedding = self.fourier_embedder(boxes) # B*N*4 --> B*N*C
227
+
228
+ # learnable null embedding
229
+ positive_null = self.null_positive_feature.to(device=boxes.device, dtype=boxes.dtype).view(1, 1, -1)
230
+ xyxy_null = self.null_position_feature.to(device=boxes.device, dtype=boxes.dtype).view(1, 1, -1)
231
+
232
+ # replace padding with learnable null embedding
233
+ positive_embeddings = positive_embeddings * \
234
+ masks + (1 - masks) * positive_null
235
+ xyxy_embedding = xyxy_embedding * masks + (1 - masks) * xyxy_null
236
+
237
+ objs = self.linears(
238
+ torch.cat([positive_embeddings, xyxy_embedding], dim=-1))
239
+ assert objs.shape == torch.Size([B, N, self.out_dim])
240
+ return objs
241
+
242
+
243
+ class Gligen(nn.Module):
244
+ def __init__(self, modules, position_net, key_dim):
245
+ super().__init__()
246
+ self.module_list = nn.ModuleList(modules)
247
+ self.position_net = position_net
248
+ self.key_dim = key_dim
249
+ self.max_objs = 30
250
+ self.current_device = torch.device("cpu")
251
+
252
+ def _set_position(self, boxes, masks, positive_embeddings):
253
+ objs = self.position_net(boxes, masks, positive_embeddings)
254
+ def func(x, extra_options):
255
+ key = extra_options["transformer_index"]
256
+ module = self.module_list[key]
257
+ return module(x, objs.to(device=x.device, dtype=x.dtype))
258
+ return func
259
+
260
+ def set_position(self, latent_image_shape, position_params, device):
261
+ batch, c, h, w = latent_image_shape
262
+ masks = torch.zeros([self.max_objs], device="cpu")
263
+ boxes = []
264
+ positive_embeddings = []
265
+ for p in position_params:
266
+ x1 = (p[4]) / w
267
+ y1 = (p[3]) / h
268
+ x2 = (p[4] + p[2]) / w
269
+ y2 = (p[3] + p[1]) / h
270
+ masks[len(boxes)] = 1.0
271
+ boxes += [torch.tensor((x1, y1, x2, y2)).unsqueeze(0)]
272
+ positive_embeddings += [p[0]]
273
+ append_boxes = []
274
+ append_conds = []
275
+ if len(boxes) < self.max_objs:
276
+ append_boxes = [torch.zeros(
277
+ [self.max_objs - len(boxes), 4], device="cpu")]
278
+ append_conds = [torch.zeros(
279
+ [self.max_objs - len(boxes), self.key_dim], device="cpu")]
280
+
281
+ box_out = torch.cat(
282
+ boxes + append_boxes).unsqueeze(0).repeat(batch, 1, 1)
283
+ masks = masks.unsqueeze(0).repeat(batch, 1)
284
+ conds = torch.cat(positive_embeddings +
285
+ append_conds).unsqueeze(0).repeat(batch, 1, 1)
286
+ return self._set_position(
287
+ box_out.to(device),
288
+ masks.to(device),
289
+ conds.to(device))
290
+
291
+ def set_empty(self, latent_image_shape, device):
292
+ batch, c, h, w = latent_image_shape
293
+ masks = torch.zeros([self.max_objs], device="cpu").repeat(batch, 1)
294
+ box_out = torch.zeros([self.max_objs, 4],
295
+ device="cpu").repeat(batch, 1, 1)
296
+ conds = torch.zeros([self.max_objs, self.key_dim],
297
+ device="cpu").repeat(batch, 1, 1)
298
+ return self._set_position(
299
+ box_out.to(device),
300
+ masks.to(device),
301
+ conds.to(device))
302
+
303
+
304
+ def load_gligen(sd):
305
+ sd_k = sd.keys()
306
+ output_list = []
307
+ key_dim = 768
308
+ for a in ["input_blocks", "middle_block", "output_blocks"]:
309
+ for b in range(20):
310
+ k_temp = filter(lambda k: "{}.{}.".format(a, b)
311
+ in k and ".fuser." in k, sd_k)
312
+ k_temp = map(lambda k: (k, k.split(".fuser.")[-1]), k_temp)
313
+
314
+ n_sd = {}
315
+ for k in k_temp:
316
+ n_sd[k[1]] = sd[k[0]]
317
+ if len(n_sd) > 0:
318
+ query_dim = n_sd["linear.weight"].shape[0]
319
+ key_dim = n_sd["linear.weight"].shape[1]
320
+
321
+ if key_dim == 768: # SD1.x
322
+ n_heads = 8
323
+ d_head = query_dim // n_heads
324
+ else:
325
+ d_head = 64
326
+ n_heads = query_dim // d_head
327
+
328
+ gated = GatedSelfAttentionDense(
329
+ query_dim, key_dim, n_heads, d_head)
330
+ gated.load_state_dict(n_sd, strict=False)
331
+ output_list.append(gated)
332
+
333
+ if "position_net.null_positive_feature" in sd_k:
334
+ in_dim = sd["position_net.null_positive_feature"].shape[0]
335
+ out_dim = sd["position_net.linears.4.weight"].shape[0]
336
+
337
+ class WeightsLoader(torch.nn.Module):
338
+ pass
339
+ w = WeightsLoader()
340
+ w.position_net = PositionNet(in_dim, out_dim)
341
+ w.load_state_dict(sd, strict=False)
342
+
343
+ gligen = Gligen(output_list, w.position_net, key_dim)
344
+ return gligen
comfy/hooks.py ADDED
@@ -0,0 +1,785 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+ from typing import TYPE_CHECKING, Callable
3
+ import enum
4
+ import math
5
+ import torch
6
+ import numpy as np
7
+ import itertools
8
+ import logging
9
+
10
+ if TYPE_CHECKING:
11
+ from comfy.model_patcher import ModelPatcher, PatcherInjection
12
+ from comfy.model_base import BaseModel
13
+ from comfy.sd import CLIP
14
+ import comfy.lora
15
+ import comfy.model_management
16
+ import comfy.patcher_extension
17
+ from node_helpers import conditioning_set_values
18
+
19
+ # #######################################################################################################
20
+ # Hooks explanation
21
+ # -------------------
22
+ # The purpose of hooks is to allow conds to influence sampling without the need for ComfyUI core code to
23
+ # make explicit special cases like it does for ControlNet and GLIGEN.
24
+ #
25
+ # This is necessary for nodes/features that are intended for use with masked or scheduled conds, or those
26
+ # that should run special code when a 'marked' cond is used in sampling.
27
+ # #######################################################################################################
28
+
29
+ class EnumHookMode(enum.Enum):
30
+ '''
31
+ Priority of hook memory optimization vs. speed, mostly related to WeightHooks.
32
+
33
+ MinVram: No caching will occur for any operations related to hooks.
34
+ MaxSpeed: Excess VRAM (and RAM, once VRAM is sufficiently depleted) will be used to cache hook weights when switching hook groups.
35
+ '''
36
+ MinVram = "minvram"
37
+ MaxSpeed = "maxspeed"
38
+
39
+ class EnumHookType(enum.Enum):
40
+ '''
41
+ Hook types, each of which has different expected behavior.
42
+ '''
43
+ Weight = "weight"
44
+ ObjectPatch = "object_patch"
45
+ AdditionalModels = "add_models"
46
+ TransformerOptions = "transformer_options"
47
+ Injections = "add_injections"
48
+
49
+ class EnumWeightTarget(enum.Enum):
50
+ Model = "model"
51
+ Clip = "clip"
52
+
53
+ class EnumHookScope(enum.Enum):
54
+ '''
55
+ Determines if hook should be limited in its influence over sampling.
56
+
57
+ AllConditioning: hook will affect all conds used in sampling.
58
+ HookedOnly: hook will only affect the conds it was attached to.
59
+ '''
60
+ AllConditioning = "all_conditioning"
61
+ HookedOnly = "hooked_only"
62
+
63
+
64
+ class _HookRef:
65
+ pass
66
+
67
+
68
+ def default_should_register(hook: Hook, model: ModelPatcher, model_options: dict, target_dict: dict[str], registered: HookGroup):
69
+ '''Example for how custom_should_register function can look like.'''
70
+ return True
71
+
72
+
73
+ def create_target_dict(target: EnumWeightTarget=None, **kwargs) -> dict[str]:
74
+ '''Creates base dictionary for use with Hooks' target param.'''
75
+ d = {}
76
+ if target is not None:
77
+ d['target'] = target
78
+ d.update(kwargs)
79
+ return d
80
+
81
+
82
+ class Hook:
83
+ def __init__(self, hook_type: EnumHookType=None, hook_ref: _HookRef=None, hook_id: str=None,
84
+ hook_keyframe: HookKeyframeGroup=None, hook_scope=EnumHookScope.AllConditioning):
85
+ self.hook_type = hook_type
86
+ '''Enum identifying the general class of this hook.'''
87
+ self.hook_ref = hook_ref if hook_ref else _HookRef()
88
+ '''Reference shared between hook clones that have the same value. Should NOT be modified.'''
89
+ self.hook_id = hook_id
90
+ '''Optional string ID to identify hook; useful if need to consolidate duplicates at registration time.'''
91
+ self.hook_keyframe = hook_keyframe if hook_keyframe else HookKeyframeGroup()
92
+ '''Keyframe storage that can be referenced to get strength for current sampling step.'''
93
+ self.hook_scope = hook_scope
94
+ '''Scope of where this hook should apply in terms of the conds used in sampling run.'''
95
+ self.custom_should_register = default_should_register
96
+ '''Can be overriden with a compatible function to decide if this hook should be registered without the need to override .should_register'''
97
+
98
+ @property
99
+ def strength(self):
100
+ return self.hook_keyframe.strength
101
+
102
+ def initialize_timesteps(self, model: BaseModel):
103
+ self.reset()
104
+ self.hook_keyframe.initialize_timesteps(model)
105
+
106
+ def reset(self):
107
+ self.hook_keyframe.reset()
108
+
109
+ def clone(self):
110
+ c: Hook = self.__class__()
111
+ c.hook_type = self.hook_type
112
+ c.hook_ref = self.hook_ref
113
+ c.hook_id = self.hook_id
114
+ c.hook_keyframe = self.hook_keyframe
115
+ c.hook_scope = self.hook_scope
116
+ c.custom_should_register = self.custom_should_register
117
+ return c
118
+
119
+ def should_register(self, model: ModelPatcher, model_options: dict, target_dict: dict[str], registered: HookGroup):
120
+ return self.custom_should_register(self, model, model_options, target_dict, registered)
121
+
122
+ def add_hook_patches(self, model: ModelPatcher, model_options: dict, target_dict: dict[str], registered: HookGroup):
123
+ raise NotImplementedError("add_hook_patches should be defined for Hook subclasses")
124
+
125
+ def __eq__(self, other: Hook):
126
+ return self.__class__ == other.__class__ and self.hook_ref == other.hook_ref
127
+
128
+ def __hash__(self):
129
+ return hash(self.hook_ref)
130
+
131
+ class WeightHook(Hook):
132
+ '''
133
+ Hook responsible for tracking weights to be applied to some model/clip.
134
+
135
+ Note, value of hook_scope is ignored and is treated as HookedOnly.
136
+ '''
137
+ def __init__(self, strength_model=1.0, strength_clip=1.0):
138
+ super().__init__(hook_type=EnumHookType.Weight, hook_scope=EnumHookScope.HookedOnly)
139
+ self.weights: dict = None
140
+ self.weights_clip: dict = None
141
+ self.need_weight_init = True
142
+ self._strength_model = strength_model
143
+ self._strength_clip = strength_clip
144
+ self.hook_scope = EnumHookScope.HookedOnly # this value does not matter for WeightHooks, just for docs
145
+
146
+ @property
147
+ def strength_model(self):
148
+ return self._strength_model * self.strength
149
+
150
+ @property
151
+ def strength_clip(self):
152
+ return self._strength_clip * self.strength
153
+
154
+ def add_hook_patches(self, model: ModelPatcher, model_options: dict, target_dict: dict[str], registered: HookGroup):
155
+ if not self.should_register(model, model_options, target_dict, registered):
156
+ return False
157
+ weights = None
158
+
159
+ target = target_dict.get('target', None)
160
+ if target == EnumWeightTarget.Clip:
161
+ strength = self._strength_clip
162
+ else:
163
+ strength = self._strength_model
164
+
165
+ if self.need_weight_init:
166
+ key_map = {}
167
+ if target == EnumWeightTarget.Clip:
168
+ key_map = comfy.lora.model_lora_keys_clip(model.model, key_map)
169
+ else:
170
+ key_map = comfy.lora.model_lora_keys_unet(model.model, key_map)
171
+ weights = comfy.lora.load_lora(self.weights, key_map, log_missing=False)
172
+ else:
173
+ if target == EnumWeightTarget.Clip:
174
+ weights = self.weights_clip
175
+ else:
176
+ weights = self.weights
177
+ model.add_hook_patches(hook=self, patches=weights, strength_patch=strength)
178
+ registered.add(self)
179
+ return True
180
+ # TODO: add logs about any keys that were not applied
181
+
182
+ def clone(self):
183
+ c: WeightHook = super().clone()
184
+ c.weights = self.weights
185
+ c.weights_clip = self.weights_clip
186
+ c.need_weight_init = self.need_weight_init
187
+ c._strength_model = self._strength_model
188
+ c._strength_clip = self._strength_clip
189
+ return c
190
+
191
+ class ObjectPatchHook(Hook):
192
+ def __init__(self, object_patches: dict[str]=None,
193
+ hook_scope=EnumHookScope.AllConditioning):
194
+ super().__init__(hook_type=EnumHookType.ObjectPatch)
195
+ self.object_patches = object_patches
196
+ self.hook_scope = hook_scope
197
+
198
+ def clone(self):
199
+ c: ObjectPatchHook = super().clone()
200
+ c.object_patches = self.object_patches
201
+ return c
202
+
203
+ def add_hook_patches(self, model: ModelPatcher, model_options: dict, target_dict: dict[str], registered: HookGroup):
204
+ raise NotImplementedError("ObjectPatchHook is not supported yet in ComfyUI.")
205
+
206
+ class AdditionalModelsHook(Hook):
207
+ '''
208
+ Hook responsible for telling model management any additional models that should be loaded.
209
+
210
+ Note, value of hook_scope is ignored and is treated as AllConditioning.
211
+ '''
212
+ def __init__(self, models: list[ModelPatcher]=None, key: str=None):
213
+ super().__init__(hook_type=EnumHookType.AdditionalModels)
214
+ self.models = models
215
+ self.key = key
216
+
217
+ def clone(self):
218
+ c: AdditionalModelsHook = super().clone()
219
+ c.models = self.models.copy() if self.models else self.models
220
+ c.key = self.key
221
+ return c
222
+
223
+ def add_hook_patches(self, model: ModelPatcher, model_options: dict, target_dict: dict[str], registered: HookGroup):
224
+ if not self.should_register(model, model_options, target_dict, registered):
225
+ return False
226
+ registered.add(self)
227
+ return True
228
+
229
+ class TransformerOptionsHook(Hook):
230
+ '''
231
+ Hook responsible for adding wrappers, callbacks, patches, or anything else related to transformer_options.
232
+ '''
233
+ def __init__(self, transformers_dict: dict[str, dict[str, dict[str, list[Callable]]]]=None,
234
+ hook_scope=EnumHookScope.AllConditioning):
235
+ super().__init__(hook_type=EnumHookType.TransformerOptions)
236
+ self.transformers_dict = transformers_dict
237
+ self.hook_scope = hook_scope
238
+ self._skip_adding = False
239
+ '''Internal value used to avoid double load of transformer_options when hook_scope is AllConditioning.'''
240
+
241
+ def clone(self):
242
+ c: TransformerOptionsHook = super().clone()
243
+ c.transformers_dict = self.transformers_dict
244
+ c._skip_adding = self._skip_adding
245
+ return c
246
+
247
+ def add_hook_patches(self, model: ModelPatcher, model_options: dict, target_dict: dict[str], registered: HookGroup):
248
+ if not self.should_register(model, model_options, target_dict, registered):
249
+ return False
250
+ # NOTE: to_load_options will be used to manually load patches/wrappers/callbacks from hooks
251
+ self._skip_adding = False
252
+ if self.hook_scope == EnumHookScope.AllConditioning:
253
+ add_model_options = {"transformer_options": self.transformers_dict,
254
+ "to_load_options": self.transformers_dict}
255
+ # skip_adding if included in AllConditioning to avoid double loading
256
+ self._skip_adding = True
257
+ else:
258
+ add_model_options = {"to_load_options": self.transformers_dict}
259
+ registered.add(self)
260
+ comfy.patcher_extension.merge_nested_dicts(model_options, add_model_options, copy_dict1=False)
261
+ return True
262
+
263
+ def on_apply_hooks(self, model: ModelPatcher, transformer_options: dict[str]):
264
+ if not self._skip_adding:
265
+ comfy.patcher_extension.merge_nested_dicts(transformer_options, self.transformers_dict, copy_dict1=False)
266
+
267
+ WrapperHook = TransformerOptionsHook
268
+ '''Only here for backwards compatibility, WrapperHook is identical to TransformerOptionsHook.'''
269
+
270
+ class InjectionsHook(Hook):
271
+ def __init__(self, key: str=None, injections: list[PatcherInjection]=None,
272
+ hook_scope=EnumHookScope.AllConditioning):
273
+ super().__init__(hook_type=EnumHookType.Injections)
274
+ self.key = key
275
+ self.injections = injections
276
+ self.hook_scope = hook_scope
277
+
278
+ def clone(self):
279
+ c: InjectionsHook = super().clone()
280
+ c.key = self.key
281
+ c.injections = self.injections.copy() if self.injections else self.injections
282
+ return c
283
+
284
+ def add_hook_patches(self, model: ModelPatcher, model_options: dict, target_dict: dict[str], registered: HookGroup):
285
+ raise NotImplementedError("InjectionsHook is not supported yet in ComfyUI.")
286
+
287
+ class HookGroup:
288
+ '''
289
+ Stores groups of hooks, and allows them to be queried by type.
290
+
291
+ To prevent breaking their functionality, never modify the underlying self.hooks or self._hook_dict vars directly;
292
+ always use the provided functions on HookGroup.
293
+ '''
294
+ def __init__(self):
295
+ self.hooks: list[Hook] = []
296
+ self._hook_dict: dict[EnumHookType, list[Hook]] = {}
297
+
298
+ def __len__(self):
299
+ return len(self.hooks)
300
+
301
+ def add(self, hook: Hook):
302
+ if hook not in self.hooks:
303
+ self.hooks.append(hook)
304
+ self._hook_dict.setdefault(hook.hook_type, []).append(hook)
305
+
306
+ def remove(self, hook: Hook):
307
+ if hook in self.hooks:
308
+ self.hooks.remove(hook)
309
+ self._hook_dict[hook.hook_type].remove(hook)
310
+
311
+ def get_type(self, hook_type: EnumHookType):
312
+ return self._hook_dict.get(hook_type, [])
313
+
314
+ def contains(self, hook: Hook):
315
+ return hook in self.hooks
316
+
317
+ def is_subset_of(self, other: HookGroup):
318
+ self_hooks = set(self.hooks)
319
+ other_hooks = set(other.hooks)
320
+ return self_hooks.issubset(other_hooks)
321
+
322
+ def new_with_common_hooks(self, other: HookGroup):
323
+ c = HookGroup()
324
+ for hook in self.hooks:
325
+ if other.contains(hook):
326
+ c.add(hook.clone())
327
+ return c
328
+
329
+ def clone(self):
330
+ c = HookGroup()
331
+ for hook in self.hooks:
332
+ c.add(hook.clone())
333
+ return c
334
+
335
+ def clone_and_combine(self, other: HookGroup):
336
+ c = self.clone()
337
+ if other is not None:
338
+ for hook in other.hooks:
339
+ c.add(hook.clone())
340
+ return c
341
+
342
+ def set_keyframes_on_hooks(self, hook_kf: HookKeyframeGroup):
343
+ if hook_kf is None:
344
+ hook_kf = HookKeyframeGroup()
345
+ else:
346
+ hook_kf = hook_kf.clone()
347
+ for hook in self.hooks:
348
+ hook.hook_keyframe = hook_kf
349
+
350
+ def get_hooks_for_clip_schedule(self):
351
+ scheduled_hooks: dict[WeightHook, list[tuple[tuple[float,float], HookKeyframe]]] = {}
352
+ # only care about WeightHooks, for now
353
+ for hook in self.get_type(EnumHookType.Weight):
354
+ hook: WeightHook
355
+ hook_schedule = []
356
+ # if no hook keyframes, assign default value
357
+ if len(hook.hook_keyframe.keyframes) == 0:
358
+ hook_schedule.append(((0.0, 1.0), None))
359
+ scheduled_hooks[hook] = hook_schedule
360
+ continue
361
+ # find ranges of values
362
+ prev_keyframe = hook.hook_keyframe.keyframes[0]
363
+ for keyframe in hook.hook_keyframe.keyframes:
364
+ if keyframe.start_percent > prev_keyframe.start_percent and not math.isclose(keyframe.strength, prev_keyframe.strength):
365
+ hook_schedule.append(((prev_keyframe.start_percent, keyframe.start_percent), prev_keyframe))
366
+ prev_keyframe = keyframe
367
+ elif keyframe.start_percent == prev_keyframe.start_percent:
368
+ prev_keyframe = keyframe
369
+ # create final range, assuming last start_percent was not 1.0
370
+ if not math.isclose(prev_keyframe.start_percent, 1.0):
371
+ hook_schedule.append(((prev_keyframe.start_percent, 1.0), prev_keyframe))
372
+ scheduled_hooks[hook] = hook_schedule
373
+ # hooks should not have their schedules in a list of tuples
374
+ all_ranges: list[tuple[float, float]] = []
375
+ for range_kfs in scheduled_hooks.values():
376
+ for t_range, keyframe in range_kfs:
377
+ all_ranges.append(t_range)
378
+ # turn list of ranges into boundaries
379
+ boundaries_set = set(itertools.chain.from_iterable(all_ranges))
380
+ boundaries_set.add(0.0)
381
+ boundaries = sorted(boundaries_set)
382
+ real_ranges = [(boundaries[i], boundaries[i + 1]) for i in range(len(boundaries) - 1)]
383
+ # with real ranges defined, give appropriate hooks w/ keyframes for each range
384
+ scheduled_keyframes: list[tuple[tuple[float,float], list[tuple[WeightHook, HookKeyframe]]]] = []
385
+ for t_range in real_ranges:
386
+ hooks_schedule = []
387
+ for hook, val in scheduled_hooks.items():
388
+ keyframe = None
389
+ # check if is a keyframe that works for the current t_range
390
+ for stored_range, stored_kf in val:
391
+ # if stored start is less than current end, then fits - give it assigned keyframe
392
+ if stored_range[0] < t_range[1] and stored_range[1] > t_range[0]:
393
+ keyframe = stored_kf
394
+ break
395
+ hooks_schedule.append((hook, keyframe))
396
+ scheduled_keyframes.append((t_range, hooks_schedule))
397
+ return scheduled_keyframes
398
+
399
+ def reset(self):
400
+ for hook in self.hooks:
401
+ hook.reset()
402
+
403
+ @staticmethod
404
+ def combine_all_hooks(hooks_list: list[HookGroup], require_count=0) -> HookGroup:
405
+ actual: list[HookGroup] = []
406
+ for group in hooks_list:
407
+ if group is not None:
408
+ actual.append(group)
409
+ if len(actual) < require_count:
410
+ raise Exception(f"Need at least {require_count} hooks to combine, but only had {len(actual)}.")
411
+ # if no hooks, then return None
412
+ if len(actual) == 0:
413
+ return None
414
+ # if only 1 hook, just return itself without cloning
415
+ elif len(actual) == 1:
416
+ return actual[0]
417
+ final_hook: HookGroup = None
418
+ for hook in actual:
419
+ if final_hook is None:
420
+ final_hook = hook.clone()
421
+ else:
422
+ final_hook = final_hook.clone_and_combine(hook)
423
+ return final_hook
424
+
425
+
426
+ class HookKeyframe:
427
+ def __init__(self, strength: float, start_percent=0.0, guarantee_steps=1):
428
+ self.strength = strength
429
+ # scheduling
430
+ self.start_percent = float(start_percent)
431
+ self.start_t = 999999999.9
432
+ self.guarantee_steps = guarantee_steps
433
+
434
+ def get_effective_guarantee_steps(self, max_sigma: torch.Tensor):
435
+ '''If keyframe starts before current sampling range (max_sigma), treat as 0.'''
436
+ if self.start_t > max_sigma:
437
+ return 0
438
+ return self.guarantee_steps
439
+
440
+ def clone(self):
441
+ c = HookKeyframe(strength=self.strength,
442
+ start_percent=self.start_percent, guarantee_steps=self.guarantee_steps)
443
+ c.start_t = self.start_t
444
+ return c
445
+
446
+ class HookKeyframeGroup:
447
+ def __init__(self):
448
+ self.keyframes: list[HookKeyframe] = []
449
+ self._current_keyframe: HookKeyframe = None
450
+ self._current_used_steps = 0
451
+ self._current_index = 0
452
+ self._current_strength = None
453
+ self._curr_t = -1.
454
+
455
+ # properties shadow those of HookWeightsKeyframe
456
+ @property
457
+ def strength(self):
458
+ if self._current_keyframe is not None:
459
+ return self._current_keyframe.strength
460
+ return 1.0
461
+
462
+ def reset(self):
463
+ self._current_keyframe = None
464
+ self._current_used_steps = 0
465
+ self._current_index = 0
466
+ self._current_strength = None
467
+ self.curr_t = -1.
468
+ self._set_first_as_current()
469
+
470
+ def add(self, keyframe: HookKeyframe):
471
+ # add to end of list, then sort
472
+ self.keyframes.append(keyframe)
473
+ self.keyframes = get_sorted_list_via_attr(self.keyframes, "start_percent")
474
+ self._set_first_as_current()
475
+
476
+ def _set_first_as_current(self):
477
+ if len(self.keyframes) > 0:
478
+ self._current_keyframe = self.keyframes[0]
479
+ else:
480
+ self._current_keyframe = None
481
+
482
+ def has_guarantee_steps(self):
483
+ for kf in self.keyframes:
484
+ if kf.guarantee_steps > 0:
485
+ return True
486
+ return False
487
+
488
+ def has_index(self, index: int):
489
+ return index >= 0 and index < len(self.keyframes)
490
+
491
+ def is_empty(self):
492
+ return len(self.keyframes) == 0
493
+
494
+ def clone(self):
495
+ c = HookKeyframeGroup()
496
+ for keyframe in self.keyframes:
497
+ c.keyframes.append(keyframe.clone())
498
+ c._set_first_as_current()
499
+ return c
500
+
501
+ def initialize_timesteps(self, model: BaseModel):
502
+ for keyframe in self.keyframes:
503
+ keyframe.start_t = model.model_sampling.percent_to_sigma(keyframe.start_percent)
504
+
505
+ def prepare_current_keyframe(self, curr_t: float, transformer_options: dict[str, torch.Tensor]) -> bool:
506
+ if self.is_empty():
507
+ return False
508
+ if curr_t == self._curr_t:
509
+ return False
510
+ max_sigma = torch.max(transformer_options["sample_sigmas"])
511
+ prev_index = self._current_index
512
+ prev_strength = self._current_strength
513
+ # if met guaranteed steps, look for next keyframe in case need to switch
514
+ if self._current_used_steps >= self._current_keyframe.get_effective_guarantee_steps(max_sigma):
515
+ # if has next index, loop through and see if need to switch
516
+ if self.has_index(self._current_index+1):
517
+ for i in range(self._current_index+1, len(self.keyframes)):
518
+ eval_c = self.keyframes[i]
519
+ # check if start_t is greater or equal to curr_t
520
+ # NOTE: t is in terms of sigmas, not percent, so bigger number = earlier step in sampling
521
+ if eval_c.start_t >= curr_t:
522
+ self._current_index = i
523
+ self._current_strength = eval_c.strength
524
+ self._current_keyframe = eval_c
525
+ self._current_used_steps = 0
526
+ # if guarantee_steps greater than zero, stop searching for other keyframes
527
+ if self._current_keyframe.get_effective_guarantee_steps(max_sigma) > 0:
528
+ break
529
+ # if eval_c is outside the percent range, stop looking further
530
+ else: break
531
+ # update steps current context is used
532
+ self._current_used_steps += 1
533
+ # update current timestep this was performed on
534
+ self._curr_t = curr_t
535
+ # return True if keyframe changed, False if no change
536
+ return prev_index != self._current_index and prev_strength != self._current_strength
537
+
538
+
539
+ class InterpolationMethod:
540
+ LINEAR = "linear"
541
+ EASE_IN = "ease_in"
542
+ EASE_OUT = "ease_out"
543
+ EASE_IN_OUT = "ease_in_out"
544
+
545
+ _LIST = [LINEAR, EASE_IN, EASE_OUT, EASE_IN_OUT]
546
+
547
+ @classmethod
548
+ def get_weights(cls, num_from: float, num_to: float, length: int, method: str, reverse=False):
549
+ diff = num_to - num_from
550
+ if method == cls.LINEAR:
551
+ weights = torch.linspace(num_from, num_to, length)
552
+ elif method == cls.EASE_IN:
553
+ index = torch.linspace(0, 1, length)
554
+ weights = diff * np.power(index, 2) + num_from
555
+ elif method == cls.EASE_OUT:
556
+ index = torch.linspace(0, 1, length)
557
+ weights = diff * (1 - np.power(1 - index, 2)) + num_from
558
+ elif method == cls.EASE_IN_OUT:
559
+ index = torch.linspace(0, 1, length)
560
+ weights = diff * ((1 - np.cos(index * np.pi)) / 2) + num_from
561
+ else:
562
+ raise ValueError(f"Unrecognized interpolation method '{method}'.")
563
+ if reverse:
564
+ weights = weights.flip(dims=(0,))
565
+ return weights
566
+
567
+ def get_sorted_list_via_attr(objects: list, attr: str) -> list:
568
+ if not objects:
569
+ return objects
570
+ elif len(objects) <= 1:
571
+ return [x for x in objects]
572
+ # now that we know we have to sort, do it following these rules:
573
+ # a) if objects have same value of attribute, maintain their relative order
574
+ # b) perform sorting of the groups of objects with same attributes
575
+ unique_attrs = {}
576
+ for o in objects:
577
+ val_attr = getattr(o, attr)
578
+ attr_list: list = unique_attrs.get(val_attr, list())
579
+ attr_list.append(o)
580
+ if val_attr not in unique_attrs:
581
+ unique_attrs[val_attr] = attr_list
582
+ # now that we have the unique attr values grouped together in relative order, sort them by key
583
+ sorted_attrs = dict(sorted(unique_attrs.items()))
584
+ # now flatten out the dict into a list to return
585
+ sorted_list = []
586
+ for object_list in sorted_attrs.values():
587
+ sorted_list.extend(object_list)
588
+ return sorted_list
589
+
590
+ def create_transformer_options_from_hooks(model: ModelPatcher, hooks: HookGroup, transformer_options: dict[str]=None):
591
+ # if no hooks or is not a ModelPatcher for sampling, return empty dict
592
+ if hooks is None or model.is_clip:
593
+ return {}
594
+ if transformer_options is None:
595
+ transformer_options = {}
596
+ for hook in hooks.get_type(EnumHookType.TransformerOptions):
597
+ hook: TransformerOptionsHook
598
+ hook.on_apply_hooks(model, transformer_options)
599
+ return transformer_options
600
+
601
+ def create_hook_lora(lora: dict[str, torch.Tensor], strength_model: float, strength_clip: float):
602
+ hook_group = HookGroup()
603
+ hook = WeightHook(strength_model=strength_model, strength_clip=strength_clip)
604
+ hook_group.add(hook)
605
+ hook.weights = lora
606
+ return hook_group
607
+
608
+ def create_hook_model_as_lora(weights_model, weights_clip, strength_model: float, strength_clip: float):
609
+ hook_group = HookGroup()
610
+ hook = WeightHook(strength_model=strength_model, strength_clip=strength_clip)
611
+ hook_group.add(hook)
612
+ patches_model = None
613
+ patches_clip = None
614
+ if weights_model is not None:
615
+ patches_model = {}
616
+ for key in weights_model:
617
+ patches_model[key] = ("model_as_lora", (weights_model[key],))
618
+ if weights_clip is not None:
619
+ patches_clip = {}
620
+ for key in weights_clip:
621
+ patches_clip[key] = ("model_as_lora", (weights_clip[key],))
622
+ hook.weights = patches_model
623
+ hook.weights_clip = patches_clip
624
+ hook.need_weight_init = False
625
+ return hook_group
626
+
627
+ def get_patch_weights_from_model(model: ModelPatcher, discard_model_sampling=True):
628
+ if model is None:
629
+ return None
630
+ patches_model: dict[str, torch.Tensor] = model.model.state_dict()
631
+ if discard_model_sampling:
632
+ # do not include ANY model_sampling components of the model that should act as a patch
633
+ for key in list(patches_model.keys()):
634
+ if key.startswith("model_sampling"):
635
+ patches_model.pop(key, None)
636
+ return patches_model
637
+
638
+ # NOTE: this function shows how to register weight hooks directly on the ModelPatchers
639
+ def load_hook_lora_for_models(model: ModelPatcher, clip: CLIP, lora: dict[str, torch.Tensor],
640
+ strength_model: float, strength_clip: float):
641
+ key_map = {}
642
+ if model is not None:
643
+ key_map = comfy.lora.model_lora_keys_unet(model.model, key_map)
644
+ if clip is not None:
645
+ key_map = comfy.lora.model_lora_keys_clip(clip.cond_stage_model, key_map)
646
+
647
+ hook_group = HookGroup()
648
+ hook = WeightHook()
649
+ hook_group.add(hook)
650
+ loaded: dict[str] = comfy.lora.load_lora(lora, key_map)
651
+ if model is not None:
652
+ new_modelpatcher = model.clone()
653
+ k = new_modelpatcher.add_hook_patches(hook=hook, patches=loaded, strength_patch=strength_model)
654
+ else:
655
+ k = ()
656
+ new_modelpatcher = None
657
+
658
+ if clip is not None:
659
+ new_clip = clip.clone()
660
+ k1 = new_clip.patcher.add_hook_patches(hook=hook, patches=loaded, strength_patch=strength_clip)
661
+ else:
662
+ k1 = ()
663
+ new_clip = None
664
+ k = set(k)
665
+ k1 = set(k1)
666
+ for x in loaded:
667
+ if (x not in k) and (x not in k1):
668
+ logging.warning(f"NOT LOADED {x}")
669
+ return (new_modelpatcher, new_clip, hook_group)
670
+
671
+ def _combine_hooks_from_values(c_dict: dict[str, HookGroup], values: dict[str, HookGroup], cache: dict[tuple[HookGroup, HookGroup], HookGroup]):
672
+ hooks_key = 'hooks'
673
+ # if hooks only exist in one dict, do what's needed so that it ends up in c_dict
674
+ if hooks_key not in values:
675
+ return
676
+ if hooks_key not in c_dict:
677
+ hooks_value = values.get(hooks_key, None)
678
+ if hooks_value is not None:
679
+ c_dict[hooks_key] = hooks_value
680
+ return
681
+ # otherwise, need to combine with minimum duplication via cache
682
+ hooks_tuple = (c_dict[hooks_key], values[hooks_key])
683
+ cached_hooks = cache.get(hooks_tuple, None)
684
+ if cached_hooks is None:
685
+ new_hooks = hooks_tuple[0].clone_and_combine(hooks_tuple[1])
686
+ cache[hooks_tuple] = new_hooks
687
+ c_dict[hooks_key] = new_hooks
688
+ else:
689
+ c_dict[hooks_key] = cache[hooks_tuple]
690
+
691
+ def conditioning_set_values_with_hooks(conditioning, values={}, append_hooks=True,
692
+ cache: dict[tuple[HookGroup, HookGroup], HookGroup]=None):
693
+ c = []
694
+ if cache is None:
695
+ cache = {}
696
+ for t in conditioning:
697
+ n = [t[0], t[1].copy()]
698
+ for k in values:
699
+ if append_hooks and k == 'hooks':
700
+ _combine_hooks_from_values(n[1], values, cache)
701
+ else:
702
+ n[1][k] = values[k]
703
+ c.append(n)
704
+
705
+ return c
706
+
707
+ def set_hooks_for_conditioning(cond, hooks: HookGroup, append_hooks=True, cache: dict[tuple[HookGroup, HookGroup], HookGroup]=None):
708
+ if hooks is None:
709
+ return cond
710
+ return conditioning_set_values_with_hooks(cond, {'hooks': hooks}, append_hooks=append_hooks, cache=cache)
711
+
712
+ def set_timesteps_for_conditioning(cond, timestep_range: tuple[float,float]):
713
+ if timestep_range is None:
714
+ return cond
715
+ return conditioning_set_values(cond, {"start_percent": timestep_range[0],
716
+ "end_percent": timestep_range[1]})
717
+
718
+ def set_mask_for_conditioning(cond, mask: torch.Tensor, set_cond_area: str, strength: float):
719
+ if mask is None:
720
+ return cond
721
+ set_area_to_bounds = False
722
+ if set_cond_area != 'default':
723
+ set_area_to_bounds = True
724
+ if len(mask.shape) < 3:
725
+ mask = mask.unsqueeze(0)
726
+ return conditioning_set_values(cond, {'mask': mask,
727
+ 'set_area_to_bounds': set_area_to_bounds,
728
+ 'mask_strength': strength})
729
+
730
+ def combine_conditioning(conds: list):
731
+ combined_conds = []
732
+ for cond in conds:
733
+ combined_conds.extend(cond)
734
+ return combined_conds
735
+
736
+ def combine_with_new_conds(conds: list, new_conds: list):
737
+ combined_conds = []
738
+ for c, new_c in zip(conds, new_conds):
739
+ combined_conds.append(combine_conditioning([c, new_c]))
740
+ return combined_conds
741
+
742
+ def set_conds_props(conds: list, strength: float, set_cond_area: str,
743
+ mask: torch.Tensor=None, hooks: HookGroup=None, timesteps_range: tuple[float,float]=None, append_hooks=True):
744
+ final_conds = []
745
+ cache = {}
746
+ for c in conds:
747
+ # first, apply lora_hook to conditioning, if provided
748
+ c = set_hooks_for_conditioning(c, hooks, append_hooks=append_hooks, cache=cache)
749
+ # next, apply mask to conditioning
750
+ c = set_mask_for_conditioning(cond=c, mask=mask, strength=strength, set_cond_area=set_cond_area)
751
+ # apply timesteps, if present
752
+ c = set_timesteps_for_conditioning(cond=c, timestep_range=timesteps_range)
753
+ # finally, apply mask to conditioning and store
754
+ final_conds.append(c)
755
+ return final_conds
756
+
757
+ def set_conds_props_and_combine(conds: list, new_conds: list, strength: float=1.0, set_cond_area: str="default",
758
+ mask: torch.Tensor=None, hooks: HookGroup=None, timesteps_range: tuple[float,float]=None, append_hooks=True):
759
+ combined_conds = []
760
+ cache = {}
761
+ for c, masked_c in zip(conds, new_conds):
762
+ # first, apply lora_hook to new conditioning, if provided
763
+ masked_c = set_hooks_for_conditioning(masked_c, hooks, append_hooks=append_hooks, cache=cache)
764
+ # next, apply mask to new conditioning, if provided
765
+ masked_c = set_mask_for_conditioning(cond=masked_c, mask=mask, set_cond_area=set_cond_area, strength=strength)
766
+ # apply timesteps, if present
767
+ masked_c = set_timesteps_for_conditioning(cond=masked_c, timestep_range=timesteps_range)
768
+ # finally, combine with existing conditioning and store
769
+ combined_conds.append(combine_conditioning([c, masked_c]))
770
+ return combined_conds
771
+
772
+ def set_default_conds_and_combine(conds: list, new_conds: list,
773
+ hooks: HookGroup=None, timesteps_range: tuple[float,float]=None, append_hooks=True):
774
+ combined_conds = []
775
+ cache = {}
776
+ for c, new_c in zip(conds, new_conds):
777
+ # first, apply lora_hook to new conditioning, if provided
778
+ new_c = set_hooks_for_conditioning(new_c, hooks, append_hooks=append_hooks, cache=cache)
779
+ # next, add default_cond key to cond so that during sampling, it can be identified
780
+ new_c = conditioning_set_values(new_c, {'default': True})
781
+ # apply timesteps, if present
782
+ new_c = set_timesteps_for_conditioning(cond=new_c, timestep_range=timesteps_range)
783
+ # finally, combine with existing conditioning and store
784
+ combined_conds.append(combine_conditioning([c, new_c]))
785
+ return combined_conds
comfy/k_diffusion/deis.py ADDED
@@ -0,0 +1,120 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #Taken from: https://github.com/zju-pi/diff-sampler/blob/main/gits-main/solver_utils.py
2
+ #under Apache 2 license
3
+ import torch
4
+ import numpy as np
5
+
6
+ # A pytorch reimplementation of DEIS (https://github.com/qsh-zh/deis).
7
+ #############################
8
+ ### Utils for DEIS solver ###
9
+ #############################
10
+ #----------------------------------------------------------------------------
11
+ # Transfer from the input time (sigma) used in EDM to that (t) used in DEIS.
12
+
13
+ def edm2t(edm_steps, epsilon_s=1e-3, sigma_min=0.002, sigma_max=80):
14
+ vp_sigma_inv = lambda beta_d, beta_min: lambda sigma: ((beta_min ** 2 + 2 * beta_d * (sigma ** 2 + 1).log()).sqrt() - beta_min) / beta_d
15
+ vp_beta_d = 2 * (np.log(torch.tensor(sigma_min).cpu() ** 2 + 1) / epsilon_s - np.log(torch.tensor(sigma_max).cpu() ** 2 + 1)) / (epsilon_s - 1)
16
+ vp_beta_min = np.log(torch.tensor(sigma_max).cpu() ** 2 + 1) - 0.5 * vp_beta_d
17
+ t_steps = vp_sigma_inv(vp_beta_d.clone().detach().cpu(), vp_beta_min.clone().detach().cpu())(edm_steps.clone().detach().cpu())
18
+ return t_steps, vp_beta_min, vp_beta_d + vp_beta_min
19
+
20
+ #----------------------------------------------------------------------------
21
+
22
+ def cal_poly(prev_t, j, taus):
23
+ poly = 1
24
+ for k in range(prev_t.shape[0]):
25
+ if k == j:
26
+ continue
27
+ poly *= (taus - prev_t[k]) / (prev_t[j] - prev_t[k])
28
+ return poly
29
+
30
+ #----------------------------------------------------------------------------
31
+ # Transfer from t to alpha_t.
32
+
33
+ def t2alpha_fn(beta_0, beta_1, t):
34
+ return torch.exp(-0.5 * t ** 2 * (beta_1 - beta_0) - t * beta_0)
35
+
36
+ #----------------------------------------------------------------------------
37
+
38
+ def cal_intergrand(beta_0, beta_1, taus):
39
+ with torch.inference_mode(mode=False):
40
+ taus = taus.clone()
41
+ beta_0 = beta_0.clone()
42
+ beta_1 = beta_1.clone()
43
+ with torch.enable_grad():
44
+ taus.requires_grad_(True)
45
+ alpha = t2alpha_fn(beta_0, beta_1, taus)
46
+ log_alpha = alpha.log()
47
+ log_alpha.sum().backward()
48
+ d_log_alpha_dtau = taus.grad
49
+ integrand = -0.5 * d_log_alpha_dtau / torch.sqrt(alpha * (1 - alpha))
50
+ return integrand
51
+
52
+ #----------------------------------------------------------------------------
53
+
54
+ def get_deis_coeff_list(t_steps, max_order, N=10000, deis_mode='tab'):
55
+ """
56
+ Get the coefficient list for DEIS sampling.
57
+
58
+ Args:
59
+ t_steps: A pytorch tensor. The time steps for sampling.
60
+ max_order: A `int`. Maximum order of the solver. 1 <= max_order <= 4
61
+ N: A `int`. Use how many points to perform the numerical integration when deis_mode=='tab'.
62
+ deis_mode: A `str`. Select between 'tab' and 'rhoab'. Type of DEIS.
63
+ Returns:
64
+ A pytorch tensor. A batch of generated samples or sampling trajectories if return_inters=True.
65
+ """
66
+ if deis_mode == 'tab':
67
+ t_steps, beta_0, beta_1 = edm2t(t_steps)
68
+ C = []
69
+ for i, (t_cur, t_next) in enumerate(zip(t_steps[:-1], t_steps[1:])):
70
+ order = min(i+1, max_order)
71
+ if order == 1:
72
+ C.append([])
73
+ else:
74
+ taus = torch.linspace(t_cur, t_next, N) # split the interval for integral appximation
75
+ dtau = (t_next - t_cur) / N
76
+ prev_t = t_steps[[i - k for k in range(order)]]
77
+ coeff_temp = []
78
+ integrand = cal_intergrand(beta_0, beta_1, taus)
79
+ for j in range(order):
80
+ poly = cal_poly(prev_t, j, taus)
81
+ coeff_temp.append(torch.sum(integrand * poly) * dtau)
82
+ C.append(coeff_temp)
83
+
84
+ elif deis_mode == 'rhoab':
85
+ # Analytical solution, second order
86
+ def get_def_intergral_2(a, b, start, end, c):
87
+ coeff = (end**3 - start**3) / 3 - (end**2 - start**2) * (a + b) / 2 + (end - start) * a * b
88
+ return coeff / ((c - a) * (c - b))
89
+
90
+ # Analytical solution, third order
91
+ def get_def_intergral_3(a, b, c, start, end, d):
92
+ coeff = (end**4 - start**4) / 4 - (end**3 - start**3) * (a + b + c) / 3 \
93
+ + (end**2 - start**2) * (a*b + a*c + b*c) / 2 - (end - start) * a * b * c
94
+ return coeff / ((d - a) * (d - b) * (d - c))
95
+
96
+ C = []
97
+ for i, (t_cur, t_next) in enumerate(zip(t_steps[:-1], t_steps[1:])):
98
+ order = min(i, max_order)
99
+ if order == 0:
100
+ C.append([])
101
+ else:
102
+ prev_t = t_steps[[i - k for k in range(order+1)]]
103
+ if order == 1:
104
+ coeff_cur = ((t_next - prev_t[1])**2 - (t_cur - prev_t[1])**2) / (2 * (t_cur - prev_t[1]))
105
+ coeff_prev1 = (t_next - t_cur)**2 / (2 * (prev_t[1] - t_cur))
106
+ coeff_temp = [coeff_cur, coeff_prev1]
107
+ elif order == 2:
108
+ coeff_cur = get_def_intergral_2(prev_t[1], prev_t[2], t_cur, t_next, t_cur)
109
+ coeff_prev1 = get_def_intergral_2(t_cur, prev_t[2], t_cur, t_next, prev_t[1])
110
+ coeff_prev2 = get_def_intergral_2(t_cur, prev_t[1], t_cur, t_next, prev_t[2])
111
+ coeff_temp = [coeff_cur, coeff_prev1, coeff_prev2]
112
+ elif order == 3:
113
+ coeff_cur = get_def_intergral_3(prev_t[1], prev_t[2], prev_t[3], t_cur, t_next, t_cur)
114
+ coeff_prev1 = get_def_intergral_3(t_cur, prev_t[2], prev_t[3], t_cur, t_next, prev_t[1])
115
+ coeff_prev2 = get_def_intergral_3(t_cur, prev_t[1], prev_t[3], t_cur, t_next, prev_t[2])
116
+ coeff_prev3 = get_def_intergral_3(t_cur, prev_t[1], prev_t[2], t_cur, t_next, prev_t[3])
117
+ coeff_temp = [coeff_cur, coeff_prev1, coeff_prev2, coeff_prev3]
118
+ C.append(coeff_temp)
119
+ return C
120
+
comfy/k_diffusion/sampling.py ADDED
@@ -0,0 +1,1338 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+
3
+ from scipy import integrate
4
+ import torch
5
+ from torch import nn
6
+ import torchsde
7
+ from tqdm.auto import trange, tqdm
8
+
9
+ from . import utils
10
+ from . import deis
11
+ import comfy.model_patcher
12
+ import comfy.model_sampling
13
+
14
+ def append_zero(x):
15
+ return torch.cat([x, x.new_zeros([1])])
16
+
17
+
18
+ def get_sigmas_karras(n, sigma_min, sigma_max, rho=7., device='cpu'):
19
+ """Constructs the noise schedule of Karras et al. (2022)."""
20
+ ramp = torch.linspace(0, 1, n, device=device)
21
+ min_inv_rho = sigma_min ** (1 / rho)
22
+ max_inv_rho = sigma_max ** (1 / rho)
23
+ sigmas = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
24
+ return append_zero(sigmas).to(device)
25
+
26
+
27
+ def get_sigmas_exponential(n, sigma_min, sigma_max, device='cpu'):
28
+ """Constructs an exponential noise schedule."""
29
+ sigmas = torch.linspace(math.log(sigma_max), math.log(sigma_min), n, device=device).exp()
30
+ return append_zero(sigmas)
31
+
32
+
33
+ def get_sigmas_polyexponential(n, sigma_min, sigma_max, rho=1., device='cpu'):
34
+ """Constructs an polynomial in log sigma noise schedule."""
35
+ ramp = torch.linspace(1, 0, n, device=device) ** rho
36
+ sigmas = torch.exp(ramp * (math.log(sigma_max) - math.log(sigma_min)) + math.log(sigma_min))
37
+ return append_zero(sigmas)
38
+
39
+
40
+ def get_sigmas_vp(n, beta_d=19.9, beta_min=0.1, eps_s=1e-3, device='cpu'):
41
+ """Constructs a continuous VP noise schedule."""
42
+ t = torch.linspace(1, eps_s, n, device=device)
43
+ sigmas = torch.sqrt(torch.exp(beta_d * t ** 2 / 2 + beta_min * t) - 1)
44
+ return append_zero(sigmas)
45
+
46
+
47
+ def get_sigmas_laplace(n, sigma_min, sigma_max, mu=0., beta=0.5, device='cpu'):
48
+ """Constructs the noise schedule proposed by Tiankai et al. (2024). """
49
+ epsilon = 1e-5 # avoid log(0)
50
+ x = torch.linspace(0, 1, n, device=device)
51
+ clamp = lambda x: torch.clamp(x, min=sigma_min, max=sigma_max)
52
+ lmb = mu - beta * torch.sign(0.5-x) * torch.log(1 - 2 * torch.abs(0.5-x) + epsilon)
53
+ sigmas = clamp(torch.exp(lmb))
54
+ return sigmas
55
+
56
+
57
+
58
+ def to_d(x, sigma, denoised):
59
+ """Converts a denoiser output to a Karras ODE derivative."""
60
+ return (x - denoised) / utils.append_dims(sigma, x.ndim)
61
+
62
+
63
+ def get_ancestral_step(sigma_from, sigma_to, eta=1.):
64
+ """Calculates the noise level (sigma_down) to step down to and the amount
65
+ of noise to add (sigma_up) when doing an ancestral sampling step."""
66
+ if not eta:
67
+ return sigma_to, 0.
68
+ sigma_up = min(sigma_to, eta * (sigma_to ** 2 * (sigma_from ** 2 - sigma_to ** 2) / sigma_from ** 2) ** 0.5)
69
+ sigma_down = (sigma_to ** 2 - sigma_up ** 2) ** 0.5
70
+ return sigma_down, sigma_up
71
+
72
+
73
+ def default_noise_sampler(x, seed=None):
74
+ if seed is not None:
75
+ generator = torch.Generator(device=x.device)
76
+ generator.manual_seed(seed)
77
+ else:
78
+ generator = None
79
+
80
+ return lambda sigma, sigma_next: torch.randn(x.size(), dtype=x.dtype, layout=x.layout, device=x.device, generator=generator)
81
+
82
+
83
+ class BatchedBrownianTree:
84
+ """A wrapper around torchsde.BrownianTree that enables batches of entropy."""
85
+
86
+ def __init__(self, x, t0, t1, seed=None, **kwargs):
87
+ self.cpu_tree = True
88
+ if "cpu" in kwargs:
89
+ self.cpu_tree = kwargs.pop("cpu")
90
+ t0, t1, self.sign = self.sort(t0, t1)
91
+ w0 = kwargs.get('w0', torch.zeros_like(x))
92
+ if seed is None:
93
+ seed = torch.randint(0, 2 ** 63 - 1, []).item()
94
+ self.batched = True
95
+ try:
96
+ assert len(seed) == x.shape[0]
97
+ w0 = w0[0]
98
+ except TypeError:
99
+ seed = [seed]
100
+ self.batched = False
101
+ if self.cpu_tree:
102
+ self.trees = [torchsde.BrownianTree(t0.cpu(), w0.cpu(), t1.cpu(), entropy=s, **kwargs) for s in seed]
103
+ else:
104
+ self.trees = [torchsde.BrownianTree(t0, w0, t1, entropy=s, **kwargs) for s in seed]
105
+
106
+ @staticmethod
107
+ def sort(a, b):
108
+ return (a, b, 1) if a < b else (b, a, -1)
109
+
110
+ def __call__(self, t0, t1):
111
+ t0, t1, sign = self.sort(t0, t1)
112
+ if self.cpu_tree:
113
+ w = torch.stack([tree(t0.cpu().float(), t1.cpu().float()).to(t0.dtype).to(t0.device) for tree in self.trees]) * (self.sign * sign)
114
+ else:
115
+ w = torch.stack([tree(t0, t1) for tree in self.trees]) * (self.sign * sign)
116
+
117
+ return w if self.batched else w[0]
118
+
119
+
120
+ class BrownianTreeNoiseSampler:
121
+ """A noise sampler backed by a torchsde.BrownianTree.
122
+
123
+ Args:
124
+ x (Tensor): The tensor whose shape, device and dtype to use to generate
125
+ random samples.
126
+ sigma_min (float): The low end of the valid interval.
127
+ sigma_max (float): The high end of the valid interval.
128
+ seed (int or List[int]): The random seed. If a list of seeds is
129
+ supplied instead of a single integer, then the noise sampler will
130
+ use one BrownianTree per batch item, each with its own seed.
131
+ transform (callable): A function that maps sigma to the sampler's
132
+ internal timestep.
133
+ """
134
+
135
+ def __init__(self, x, sigma_min, sigma_max, seed=None, transform=lambda x: x, cpu=False):
136
+ self.transform = transform
137
+ t0, t1 = self.transform(torch.as_tensor(sigma_min)), self.transform(torch.as_tensor(sigma_max))
138
+ self.tree = BatchedBrownianTree(x, t0, t1, seed, cpu=cpu)
139
+
140
+ def __call__(self, sigma, sigma_next):
141
+ t0, t1 = self.transform(torch.as_tensor(sigma)), self.transform(torch.as_tensor(sigma_next))
142
+ return self.tree(t0, t1) / (t1 - t0).abs().sqrt()
143
+
144
+
145
+ @torch.no_grad()
146
+ def sample_euler(model, x, sigmas, extra_args=None, callback=None, disable=None, s_churn=0., s_tmin=0., s_tmax=float('inf'), s_noise=1.):
147
+ """Implements Algorithm 2 (Euler steps) from Karras et al. (2022)."""
148
+ extra_args = {} if extra_args is None else extra_args
149
+ s_in = x.new_ones([x.shape[0]])
150
+ for i in trange(len(sigmas) - 1, disable=disable):
151
+ if s_churn > 0:
152
+ gamma = min(s_churn / (len(sigmas) - 1), 2 ** 0.5 - 1) if s_tmin <= sigmas[i] <= s_tmax else 0.
153
+ sigma_hat = sigmas[i] * (gamma + 1)
154
+ else:
155
+ gamma = 0
156
+ sigma_hat = sigmas[i]
157
+
158
+ if gamma > 0:
159
+ eps = torch.randn_like(x) * s_noise
160
+ x = x + eps * (sigma_hat ** 2 - sigmas[i] ** 2) ** 0.5
161
+ denoised = model(x, sigma_hat * s_in, **extra_args)
162
+ d = to_d(x, sigma_hat, denoised)
163
+ if callback is not None:
164
+ callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigma_hat, 'denoised': denoised})
165
+ dt = sigmas[i + 1] - sigma_hat
166
+ # Euler method
167
+ x = x + d * dt
168
+ return x
169
+
170
+
171
+ @torch.no_grad()
172
+ def sample_euler_ancestral(model, x, sigmas, extra_args=None, callback=None, disable=None, eta=1., s_noise=1., noise_sampler=None):
173
+ if isinstance(model.inner_model.inner_model.model_sampling, comfy.model_sampling.CONST):
174
+ return sample_euler_ancestral_RF(model, x, sigmas, extra_args, callback, disable, eta, s_noise, noise_sampler)
175
+ """Ancestral sampling with Euler method steps."""
176
+ extra_args = {} if extra_args is None else extra_args
177
+ seed = extra_args.get("seed", None)
178
+ noise_sampler = default_noise_sampler(x, seed=seed) if noise_sampler is None else noise_sampler
179
+ s_in = x.new_ones([x.shape[0]])
180
+ for i in trange(len(sigmas) - 1, disable=disable):
181
+ denoised = model(x, sigmas[i] * s_in, **extra_args)
182
+ sigma_down, sigma_up = get_ancestral_step(sigmas[i], sigmas[i + 1], eta=eta)
183
+ if callback is not None:
184
+ callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigmas[i], 'denoised': denoised})
185
+
186
+ if sigma_down == 0:
187
+ x = denoised
188
+ else:
189
+ d = to_d(x, sigmas[i], denoised)
190
+ # Euler method
191
+ dt = sigma_down - sigmas[i]
192
+ x = x + d * dt + noise_sampler(sigmas[i], sigmas[i + 1]) * s_noise * sigma_up
193
+ return x
194
+
195
+ @torch.no_grad()
196
+ def sample_euler_ancestral_RF(model, x, sigmas, extra_args=None, callback=None, disable=None, eta=1.0, s_noise=1., noise_sampler=None):
197
+ """Ancestral sampling with Euler method steps."""
198
+ extra_args = {} if extra_args is None else extra_args
199
+ seed = extra_args.get("seed", None)
200
+ noise_sampler = default_noise_sampler(x, seed=seed) if noise_sampler is None else noise_sampler
201
+ s_in = x.new_ones([x.shape[0]])
202
+ for i in trange(len(sigmas) - 1, disable=disable):
203
+ denoised = model(x, sigmas[i] * s_in, **extra_args)
204
+ # sigma_down, sigma_up = get_ancestral_step(sigmas[i], sigmas[i + 1], eta=eta)
205
+ if callback is not None:
206
+ callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigmas[i], 'denoised': denoised})
207
+
208
+ if sigmas[i + 1] == 0:
209
+ x = denoised
210
+ else:
211
+ downstep_ratio = 1 + (sigmas[i + 1] / sigmas[i] - 1) * eta
212
+ sigma_down = sigmas[i + 1] * downstep_ratio
213
+ alpha_ip1 = 1 - sigmas[i + 1]
214
+ alpha_down = 1 - sigma_down
215
+ renoise_coeff = (sigmas[i + 1]**2 - sigma_down**2 * alpha_ip1**2 / alpha_down**2)**0.5
216
+ # Euler method
217
+ sigma_down_i_ratio = sigma_down / sigmas[i]
218
+ x = sigma_down_i_ratio * x + (1 - sigma_down_i_ratio) * denoised
219
+ if eta > 0:
220
+ x = (alpha_ip1 / alpha_down) * x + noise_sampler(sigmas[i], sigmas[i + 1]) * s_noise * renoise_coeff
221
+ return x
222
+
223
+ @torch.no_grad()
224
+ def sample_heun(model, x, sigmas, extra_args=None, callback=None, disable=None, s_churn=0., s_tmin=0., s_tmax=float('inf'), s_noise=1.):
225
+ """Implements Algorithm 2 (Heun steps) from Karras et al. (2022)."""
226
+ extra_args = {} if extra_args is None else extra_args
227
+ s_in = x.new_ones([x.shape[0]])
228
+ for i in trange(len(sigmas) - 1, disable=disable):
229
+ if s_churn > 0:
230
+ gamma = min(s_churn / (len(sigmas) - 1), 2 ** 0.5 - 1) if s_tmin <= sigmas[i] <= s_tmax else 0.
231
+ sigma_hat = sigmas[i] * (gamma + 1)
232
+ else:
233
+ gamma = 0
234
+ sigma_hat = sigmas[i]
235
+
236
+ sigma_hat = sigmas[i] * (gamma + 1)
237
+ if gamma > 0:
238
+ eps = torch.randn_like(x) * s_noise
239
+ x = x + eps * (sigma_hat ** 2 - sigmas[i] ** 2) ** 0.5
240
+ denoised = model(x, sigma_hat * s_in, **extra_args)
241
+ d = to_d(x, sigma_hat, denoised)
242
+ if callback is not None:
243
+ callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigma_hat, 'denoised': denoised})
244
+ dt = sigmas[i + 1] - sigma_hat
245
+ if sigmas[i + 1] == 0:
246
+ # Euler method
247
+ x = x + d * dt
248
+ else:
249
+ # Heun's method
250
+ x_2 = x + d * dt
251
+ denoised_2 = model(x_2, sigmas[i + 1] * s_in, **extra_args)
252
+ d_2 = to_d(x_2, sigmas[i + 1], denoised_2)
253
+ d_prime = (d + d_2) / 2
254
+ x = x + d_prime * dt
255
+ return x
256
+
257
+
258
+ @torch.no_grad()
259
+ def sample_dpm_2(model, x, sigmas, extra_args=None, callback=None, disable=None, s_churn=0., s_tmin=0., s_tmax=float('inf'), s_noise=1.):
260
+ """A sampler inspired by DPM-Solver-2 and Algorithm 2 from Karras et al. (2022)."""
261
+ extra_args = {} if extra_args is None else extra_args
262
+ s_in = x.new_ones([x.shape[0]])
263
+ for i in trange(len(sigmas) - 1, disable=disable):
264
+ if s_churn > 0:
265
+ gamma = min(s_churn / (len(sigmas) - 1), 2 ** 0.5 - 1) if s_tmin <= sigmas[i] <= s_tmax else 0.
266
+ sigma_hat = sigmas[i] * (gamma + 1)
267
+ else:
268
+ gamma = 0
269
+ sigma_hat = sigmas[i]
270
+
271
+ if gamma > 0:
272
+ eps = torch.randn_like(x) * s_noise
273
+ x = x + eps * (sigma_hat ** 2 - sigmas[i] ** 2) ** 0.5
274
+ denoised = model(x, sigma_hat * s_in, **extra_args)
275
+ d = to_d(x, sigma_hat, denoised)
276
+ if callback is not None:
277
+ callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigma_hat, 'denoised': denoised})
278
+ if sigmas[i + 1] == 0:
279
+ # Euler method
280
+ dt = sigmas[i + 1] - sigma_hat
281
+ x = x + d * dt
282
+ else:
283
+ # DPM-Solver-2
284
+ sigma_mid = sigma_hat.log().lerp(sigmas[i + 1].log(), 0.5).exp()
285
+ dt_1 = sigma_mid - sigma_hat
286
+ dt_2 = sigmas[i + 1] - sigma_hat
287
+ x_2 = x + d * dt_1
288
+ denoised_2 = model(x_2, sigma_mid * s_in, **extra_args)
289
+ d_2 = to_d(x_2, sigma_mid, denoised_2)
290
+ x = x + d_2 * dt_2
291
+ return x
292
+
293
+
294
+ @torch.no_grad()
295
+ def sample_dpm_2_ancestral(model, x, sigmas, extra_args=None, callback=None, disable=None, eta=1., s_noise=1., noise_sampler=None):
296
+ if isinstance(model.inner_model.inner_model.model_sampling, comfy.model_sampling.CONST):
297
+ return sample_dpm_2_ancestral_RF(model, x, sigmas, extra_args, callback, disable, eta, s_noise, noise_sampler)
298
+
299
+ """Ancestral sampling with DPM-Solver second-order steps."""
300
+ extra_args = {} if extra_args is None else extra_args
301
+ seed = extra_args.get("seed", None)
302
+ noise_sampler = default_noise_sampler(x, seed=seed) if noise_sampler is None else noise_sampler
303
+ s_in = x.new_ones([x.shape[0]])
304
+ for i in trange(len(sigmas) - 1, disable=disable):
305
+ denoised = model(x, sigmas[i] * s_in, **extra_args)
306
+ sigma_down, sigma_up = get_ancestral_step(sigmas[i], sigmas[i + 1], eta=eta)
307
+ if callback is not None:
308
+ callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigmas[i], 'denoised': denoised})
309
+ d = to_d(x, sigmas[i], denoised)
310
+ if sigma_down == 0:
311
+ # Euler method
312
+ dt = sigma_down - sigmas[i]
313
+ x = x + d * dt
314
+ else:
315
+ # DPM-Solver-2
316
+ sigma_mid = sigmas[i].log().lerp(sigma_down.log(), 0.5).exp()
317
+ dt_1 = sigma_mid - sigmas[i]
318
+ dt_2 = sigma_down - sigmas[i]
319
+ x_2 = x + d * dt_1
320
+ denoised_2 = model(x_2, sigma_mid * s_in, **extra_args)
321
+ d_2 = to_d(x_2, sigma_mid, denoised_2)
322
+ x = x + d_2 * dt_2
323
+ x = x + noise_sampler(sigmas[i], sigmas[i + 1]) * s_noise * sigma_up
324
+ return x
325
+
326
+ @torch.no_grad()
327
+ def sample_dpm_2_ancestral_RF(model, x, sigmas, extra_args=None, callback=None, disable=None, eta=1., s_noise=1., noise_sampler=None):
328
+ """Ancestral sampling with DPM-Solver second-order steps."""
329
+ extra_args = {} if extra_args is None else extra_args
330
+ seed = extra_args.get("seed", None)
331
+ noise_sampler = default_noise_sampler(x, seed=seed) if noise_sampler is None else noise_sampler
332
+ s_in = x.new_ones([x.shape[0]])
333
+ for i in trange(len(sigmas) - 1, disable=disable):
334
+ denoised = model(x, sigmas[i] * s_in, **extra_args)
335
+ downstep_ratio = 1 + (sigmas[i+1]/sigmas[i] - 1) * eta
336
+ sigma_down = sigmas[i+1] * downstep_ratio
337
+ alpha_ip1 = 1 - sigmas[i+1]
338
+ alpha_down = 1 - sigma_down
339
+ renoise_coeff = (sigmas[i+1]**2 - sigma_down**2*alpha_ip1**2/alpha_down**2)**0.5
340
+
341
+ if callback is not None:
342
+ callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigmas[i], 'denoised': denoised})
343
+ d = to_d(x, sigmas[i], denoised)
344
+ if sigma_down == 0:
345
+ # Euler method
346
+ dt = sigma_down - sigmas[i]
347
+ x = x + d * dt
348
+ else:
349
+ # DPM-Solver-2
350
+ sigma_mid = sigmas[i].log().lerp(sigma_down.log(), 0.5).exp()
351
+ dt_1 = sigma_mid - sigmas[i]
352
+ dt_2 = sigma_down - sigmas[i]
353
+ x_2 = x + d * dt_1
354
+ denoised_2 = model(x_2, sigma_mid * s_in, **extra_args)
355
+ d_2 = to_d(x_2, sigma_mid, denoised_2)
356
+ x = x + d_2 * dt_2
357
+ x = (alpha_ip1/alpha_down) * x + noise_sampler(sigmas[i], sigmas[i + 1]) * s_noise * renoise_coeff
358
+ return x
359
+
360
+ def linear_multistep_coeff(order, t, i, j):
361
+ if order - 1 > i:
362
+ raise ValueError(f'Order {order} too high for step {i}')
363
+ def fn(tau):
364
+ prod = 1.
365
+ for k in range(order):
366
+ if j == k:
367
+ continue
368
+ prod *= (tau - t[i - k]) / (t[i - j] - t[i - k])
369
+ return prod
370
+ return integrate.quad(fn, t[i], t[i + 1], epsrel=1e-4)[0]
371
+
372
+
373
+ @torch.no_grad()
374
+ def sample_lms(model, x, sigmas, extra_args=None, callback=None, disable=None, order=4):
375
+ extra_args = {} if extra_args is None else extra_args
376
+ s_in = x.new_ones([x.shape[0]])
377
+ sigmas_cpu = sigmas.detach().cpu().numpy()
378
+ ds = []
379
+ for i in trange(len(sigmas) - 1, disable=disable):
380
+ denoised = model(x, sigmas[i] * s_in, **extra_args)
381
+ d = to_d(x, sigmas[i], denoised)
382
+ ds.append(d)
383
+ if len(ds) > order:
384
+ ds.pop(0)
385
+ if callback is not None:
386
+ callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigmas[i], 'denoised': denoised})
387
+ cur_order = min(i + 1, order)
388
+ coeffs = [linear_multistep_coeff(cur_order, sigmas_cpu, i, j) for j in range(cur_order)]
389
+ x = x + sum(coeff * d for coeff, d in zip(coeffs, reversed(ds)))
390
+ return x
391
+
392
+
393
+ class PIDStepSizeController:
394
+ """A PID controller for ODE adaptive step size control."""
395
+ def __init__(self, h, pcoeff, icoeff, dcoeff, order=1, accept_safety=0.81, eps=1e-8):
396
+ self.h = h
397
+ self.b1 = (pcoeff + icoeff + dcoeff) / order
398
+ self.b2 = -(pcoeff + 2 * dcoeff) / order
399
+ self.b3 = dcoeff / order
400
+ self.accept_safety = accept_safety
401
+ self.eps = eps
402
+ self.errs = []
403
+
404
+ def limiter(self, x):
405
+ return 1 + math.atan(x - 1)
406
+
407
+ def propose_step(self, error):
408
+ inv_error = 1 / (float(error) + self.eps)
409
+ if not self.errs:
410
+ self.errs = [inv_error, inv_error, inv_error]
411
+ self.errs[0] = inv_error
412
+ factor = self.errs[0] ** self.b1 * self.errs[1] ** self.b2 * self.errs[2] ** self.b3
413
+ factor = self.limiter(factor)
414
+ accept = factor >= self.accept_safety
415
+ if accept:
416
+ self.errs[2] = self.errs[1]
417
+ self.errs[1] = self.errs[0]
418
+ self.h *= factor
419
+ return accept
420
+
421
+
422
+ class DPMSolver(nn.Module):
423
+ """DPM-Solver. See https://arxiv.org/abs/2206.00927."""
424
+
425
+ def __init__(self, model, extra_args=None, eps_callback=None, info_callback=None):
426
+ super().__init__()
427
+ self.model = model
428
+ self.extra_args = {} if extra_args is None else extra_args
429
+ self.eps_callback = eps_callback
430
+ self.info_callback = info_callback
431
+
432
+ def t(self, sigma):
433
+ return -sigma.log()
434
+
435
+ def sigma(self, t):
436
+ return t.neg().exp()
437
+
438
+ def eps(self, eps_cache, key, x, t, *args, **kwargs):
439
+ if key in eps_cache:
440
+ return eps_cache[key], eps_cache
441
+ sigma = self.sigma(t) * x.new_ones([x.shape[0]])
442
+ eps = (x - self.model(x, sigma, *args, **self.extra_args, **kwargs)) / self.sigma(t)
443
+ if self.eps_callback is not None:
444
+ self.eps_callback()
445
+ return eps, {key: eps, **eps_cache}
446
+
447
+ def dpm_solver_1_step(self, x, t, t_next, eps_cache=None):
448
+ eps_cache = {} if eps_cache is None else eps_cache
449
+ h = t_next - t
450
+ eps, eps_cache = self.eps(eps_cache, 'eps', x, t)
451
+ x_1 = x - self.sigma(t_next) * h.expm1() * eps
452
+ return x_1, eps_cache
453
+
454
+ def dpm_solver_2_step(self, x, t, t_next, r1=1 / 2, eps_cache=None):
455
+ eps_cache = {} if eps_cache is None else eps_cache
456
+ h = t_next - t
457
+ eps, eps_cache = self.eps(eps_cache, 'eps', x, t)
458
+ s1 = t + r1 * h
459
+ u1 = x - self.sigma(s1) * (r1 * h).expm1() * eps
460
+ eps_r1, eps_cache = self.eps(eps_cache, 'eps_r1', u1, s1)
461
+ x_2 = x - self.sigma(t_next) * h.expm1() * eps - self.sigma(t_next) / (2 * r1) * h.expm1() * (eps_r1 - eps)
462
+ return x_2, eps_cache
463
+
464
+ def dpm_solver_3_step(self, x, t, t_next, r1=1 / 3, r2=2 / 3, eps_cache=None):
465
+ eps_cache = {} if eps_cache is None else eps_cache
466
+ h = t_next - t
467
+ eps, eps_cache = self.eps(eps_cache, 'eps', x, t)
468
+ s1 = t + r1 * h
469
+ s2 = t + r2 * h
470
+ u1 = x - self.sigma(s1) * (r1 * h).expm1() * eps
471
+ eps_r1, eps_cache = self.eps(eps_cache, 'eps_r1', u1, s1)
472
+ u2 = x - self.sigma(s2) * (r2 * h).expm1() * eps - self.sigma(s2) * (r2 / r1) * ((r2 * h).expm1() / (r2 * h) - 1) * (eps_r1 - eps)
473
+ eps_r2, eps_cache = self.eps(eps_cache, 'eps_r2', u2, s2)
474
+ x_3 = x - self.sigma(t_next) * h.expm1() * eps - self.sigma(t_next) / r2 * (h.expm1() / h - 1) * (eps_r2 - eps)
475
+ return x_3, eps_cache
476
+
477
+ def dpm_solver_fast(self, x, t_start, t_end, nfe, eta=0., s_noise=1., noise_sampler=None):
478
+ noise_sampler = default_noise_sampler(x, seed=self.extra_args.get("seed", None)) if noise_sampler is None else noise_sampler
479
+ if not t_end > t_start and eta:
480
+ raise ValueError('eta must be 0 for reverse sampling')
481
+
482
+ m = math.floor(nfe / 3) + 1
483
+ ts = torch.linspace(t_start, t_end, m + 1, device=x.device)
484
+
485
+ if nfe % 3 == 0:
486
+ orders = [3] * (m - 2) + [2, 1]
487
+ else:
488
+ orders = [3] * (m - 1) + [nfe % 3]
489
+
490
+ for i in range(len(orders)):
491
+ eps_cache = {}
492
+ t, t_next = ts[i], ts[i + 1]
493
+ if eta:
494
+ sd, su = get_ancestral_step(self.sigma(t), self.sigma(t_next), eta)
495
+ t_next_ = torch.minimum(t_end, self.t(sd))
496
+ su = (self.sigma(t_next) ** 2 - self.sigma(t_next_) ** 2) ** 0.5
497
+ else:
498
+ t_next_, su = t_next, 0.
499
+
500
+ eps, eps_cache = self.eps(eps_cache, 'eps', x, t)
501
+ denoised = x - self.sigma(t) * eps
502
+ if self.info_callback is not None:
503
+ self.info_callback({'x': x, 'i': i, 't': ts[i], 't_up': t, 'denoised': denoised})
504
+
505
+ if orders[i] == 1:
506
+ x, eps_cache = self.dpm_solver_1_step(x, t, t_next_, eps_cache=eps_cache)
507
+ elif orders[i] == 2:
508
+ x, eps_cache = self.dpm_solver_2_step(x, t, t_next_, eps_cache=eps_cache)
509
+ else:
510
+ x, eps_cache = self.dpm_solver_3_step(x, t, t_next_, eps_cache=eps_cache)
511
+
512
+ x = x + su * s_noise * noise_sampler(self.sigma(t), self.sigma(t_next))
513
+
514
+ return x
515
+
516
+ def dpm_solver_adaptive(self, x, t_start, t_end, order=3, rtol=0.05, atol=0.0078, h_init=0.05, pcoeff=0., icoeff=1., dcoeff=0., accept_safety=0.81, eta=0., s_noise=1., noise_sampler=None):
517
+ noise_sampler = default_noise_sampler(x, seed=self.extra_args.get("seed", None)) if noise_sampler is None else noise_sampler
518
+ if order not in {2, 3}:
519
+ raise ValueError('order should be 2 or 3')
520
+ forward = t_end > t_start
521
+ if not forward and eta:
522
+ raise ValueError('eta must be 0 for reverse sampling')
523
+ h_init = abs(h_init) * (1 if forward else -1)
524
+ atol = torch.tensor(atol)
525
+ rtol = torch.tensor(rtol)
526
+ s = t_start
527
+ x_prev = x
528
+ accept = True
529
+ pid = PIDStepSizeController(h_init, pcoeff, icoeff, dcoeff, 1.5 if eta else order, accept_safety)
530
+ info = {'steps': 0, 'nfe': 0, 'n_accept': 0, 'n_reject': 0}
531
+
532
+ while s < t_end - 1e-5 if forward else s > t_end + 1e-5:
533
+ eps_cache = {}
534
+ t = torch.minimum(t_end, s + pid.h) if forward else torch.maximum(t_end, s + pid.h)
535
+ if eta:
536
+ sd, su = get_ancestral_step(self.sigma(s), self.sigma(t), eta)
537
+ t_ = torch.minimum(t_end, self.t(sd))
538
+ su = (self.sigma(t) ** 2 - self.sigma(t_) ** 2) ** 0.5
539
+ else:
540
+ t_, su = t, 0.
541
+
542
+ eps, eps_cache = self.eps(eps_cache, 'eps', x, s)
543
+ denoised = x - self.sigma(s) * eps
544
+
545
+ if order == 2:
546
+ x_low, eps_cache = self.dpm_solver_1_step(x, s, t_, eps_cache=eps_cache)
547
+ x_high, eps_cache = self.dpm_solver_2_step(x, s, t_, eps_cache=eps_cache)
548
+ else:
549
+ x_low, eps_cache = self.dpm_solver_2_step(x, s, t_, r1=1 / 3, eps_cache=eps_cache)
550
+ x_high, eps_cache = self.dpm_solver_3_step(x, s, t_, eps_cache=eps_cache)
551
+ delta = torch.maximum(atol, rtol * torch.maximum(x_low.abs(), x_prev.abs()))
552
+ error = torch.linalg.norm((x_low - x_high) / delta) / x.numel() ** 0.5
553
+ accept = pid.propose_step(error)
554
+ if accept:
555
+ x_prev = x_low
556
+ x = x_high + su * s_noise * noise_sampler(self.sigma(s), self.sigma(t))
557
+ s = t
558
+ info['n_accept'] += 1
559
+ else:
560
+ info['n_reject'] += 1
561
+ info['nfe'] += order
562
+ info['steps'] += 1
563
+
564
+ if self.info_callback is not None:
565
+ self.info_callback({'x': x, 'i': info['steps'] - 1, 't': s, 't_up': s, 'denoised': denoised, 'error': error, 'h': pid.h, **info})
566
+
567
+ return x, info
568
+
569
+
570
+ @torch.no_grad()
571
+ def sample_dpm_fast(model, x, sigma_min, sigma_max, n, extra_args=None, callback=None, disable=None, eta=0., s_noise=1., noise_sampler=None):
572
+ """DPM-Solver-Fast (fixed step size). See https://arxiv.org/abs/2206.00927."""
573
+ if sigma_min <= 0 or sigma_max <= 0:
574
+ raise ValueError('sigma_min and sigma_max must not be 0')
575
+ with tqdm(total=n, disable=disable) as pbar:
576
+ dpm_solver = DPMSolver(model, extra_args, eps_callback=pbar.update)
577
+ if callback is not None:
578
+ dpm_solver.info_callback = lambda info: callback({'sigma': dpm_solver.sigma(info['t']), 'sigma_hat': dpm_solver.sigma(info['t_up']), **info})
579
+ return dpm_solver.dpm_solver_fast(x, dpm_solver.t(torch.tensor(sigma_max)), dpm_solver.t(torch.tensor(sigma_min)), n, eta, s_noise, noise_sampler)
580
+
581
+
582
+ @torch.no_grad()
583
+ def sample_dpm_adaptive(model, x, sigma_min, sigma_max, extra_args=None, callback=None, disable=None, order=3, rtol=0.05, atol=0.0078, h_init=0.05, pcoeff=0., icoeff=1., dcoeff=0., accept_safety=0.81, eta=0., s_noise=1., noise_sampler=None, return_info=False):
584
+ """DPM-Solver-12 and 23 (adaptive step size). See https://arxiv.org/abs/2206.00927."""
585
+ if sigma_min <= 0 or sigma_max <= 0:
586
+ raise ValueError('sigma_min and sigma_max must not be 0')
587
+ with tqdm(disable=disable) as pbar:
588
+ dpm_solver = DPMSolver(model, extra_args, eps_callback=pbar.update)
589
+ if callback is not None:
590
+ dpm_solver.info_callback = lambda info: callback({'sigma': dpm_solver.sigma(info['t']), 'sigma_hat': dpm_solver.sigma(info['t_up']), **info})
591
+ x, info = dpm_solver.dpm_solver_adaptive(x, dpm_solver.t(torch.tensor(sigma_max)), dpm_solver.t(torch.tensor(sigma_min)), order, rtol, atol, h_init, pcoeff, icoeff, dcoeff, accept_safety, eta, s_noise, noise_sampler)
592
+ if return_info:
593
+ return x, info
594
+ return x
595
+
596
+
597
+ @torch.no_grad()
598
+ def sample_dpmpp_2s_ancestral(model, x, sigmas, extra_args=None, callback=None, disable=None, eta=1., s_noise=1., noise_sampler=None):
599
+ if isinstance(model.inner_model.inner_model.model_sampling, comfy.model_sampling.CONST):
600
+ return sample_dpmpp_2s_ancestral_RF(model, x, sigmas, extra_args, callback, disable, eta, s_noise, noise_sampler)
601
+
602
+ """Ancestral sampling with DPM-Solver++(2S) second-order steps."""
603
+ extra_args = {} if extra_args is None else extra_args
604
+ seed = extra_args.get("seed", None)
605
+ noise_sampler = default_noise_sampler(x, seed=seed) if noise_sampler is None else noise_sampler
606
+ s_in = x.new_ones([x.shape[0]])
607
+ sigma_fn = lambda t: t.neg().exp()
608
+ t_fn = lambda sigma: sigma.log().neg()
609
+
610
+ for i in trange(len(sigmas) - 1, disable=disable):
611
+ denoised = model(x, sigmas[i] * s_in, **extra_args)
612
+ sigma_down, sigma_up = get_ancestral_step(sigmas[i], sigmas[i + 1], eta=eta)
613
+ if callback is not None:
614
+ callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigmas[i], 'denoised': denoised})
615
+ if sigma_down == 0:
616
+ # Euler method
617
+ d = to_d(x, sigmas[i], denoised)
618
+ dt = sigma_down - sigmas[i]
619
+ x = x + d * dt
620
+ else:
621
+ # DPM-Solver++(2S)
622
+ t, t_next = t_fn(sigmas[i]), t_fn(sigma_down)
623
+ r = 1 / 2
624
+ h = t_next - t
625
+ s = t + r * h
626
+ x_2 = (sigma_fn(s) / sigma_fn(t)) * x - (-h * r).expm1() * denoised
627
+ denoised_2 = model(x_2, sigma_fn(s) * s_in, **extra_args)
628
+ x = (sigma_fn(t_next) / sigma_fn(t)) * x - (-h).expm1() * denoised_2
629
+ # Noise addition
630
+ if sigmas[i + 1] > 0:
631
+ x = x + noise_sampler(sigmas[i], sigmas[i + 1]) * s_noise * sigma_up
632
+ return x
633
+
634
+
635
+ @torch.no_grad()
636
+ def sample_dpmpp_2s_ancestral_RF(model, x, sigmas, extra_args=None, callback=None, disable=None, eta=1., s_noise=1., noise_sampler=None):
637
+ """Ancestral sampling with DPM-Solver++(2S) second-order steps."""
638
+ extra_args = {} if extra_args is None else extra_args
639
+ seed = extra_args.get("seed", None)
640
+ noise_sampler = default_noise_sampler(x, seed=seed) if noise_sampler is None else noise_sampler
641
+ s_in = x.new_ones([x.shape[0]])
642
+ sigma_fn = lambda lbda: (lbda.exp() + 1) ** -1
643
+ lambda_fn = lambda sigma: ((1-sigma)/sigma).log()
644
+
645
+ # logged_x = x.unsqueeze(0)
646
+
647
+ for i in trange(len(sigmas) - 1, disable=disable):
648
+ denoised = model(x, sigmas[i] * s_in, **extra_args)
649
+ downstep_ratio = 1 + (sigmas[i+1]/sigmas[i] - 1) * eta
650
+ sigma_down = sigmas[i+1] * downstep_ratio
651
+ alpha_ip1 = 1 - sigmas[i+1]
652
+ alpha_down = 1 - sigma_down
653
+ renoise_coeff = (sigmas[i+1]**2 - sigma_down**2*alpha_ip1**2/alpha_down**2)**0.5
654
+ # sigma_down, sigma_up = get_ancestral_step(sigmas[i], sigmas[i + 1], eta=eta)
655
+ if callback is not None:
656
+ callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigmas[i], 'denoised': denoised})
657
+ if sigmas[i + 1] == 0:
658
+ # Euler method
659
+ d = to_d(x, sigmas[i], denoised)
660
+ dt = sigma_down - sigmas[i]
661
+ x = x + d * dt
662
+ else:
663
+ # DPM-Solver++(2S)
664
+ if sigmas[i] == 1.0:
665
+ sigma_s = 0.9999
666
+ else:
667
+ t_i, t_down = lambda_fn(sigmas[i]), lambda_fn(sigma_down)
668
+ r = 1 / 2
669
+ h = t_down - t_i
670
+ s = t_i + r * h
671
+ sigma_s = sigma_fn(s)
672
+ # sigma_s = sigmas[i+1]
673
+ sigma_s_i_ratio = sigma_s / sigmas[i]
674
+ u = sigma_s_i_ratio * x + (1 - sigma_s_i_ratio) * denoised
675
+ D_i = model(u, sigma_s * s_in, **extra_args)
676
+ sigma_down_i_ratio = sigma_down / sigmas[i]
677
+ x = sigma_down_i_ratio * x + (1 - sigma_down_i_ratio) * D_i
678
+ # print("sigma_i", sigmas[i], "sigma_ip1", sigmas[i+1],"sigma_down", sigma_down, "sigma_down_i_ratio", sigma_down_i_ratio, "sigma_s_i_ratio", sigma_s_i_ratio, "renoise_coeff", renoise_coeff)
679
+ # Noise addition
680
+ if sigmas[i + 1] > 0 and eta > 0:
681
+ x = (alpha_ip1/alpha_down) * x + noise_sampler(sigmas[i], sigmas[i + 1]) * s_noise * renoise_coeff
682
+ # logged_x = torch.cat((logged_x, x.unsqueeze(0)), dim=0)
683
+ return x
684
+
685
+ @torch.no_grad()
686
+ def sample_dpmpp_sde(model, x, sigmas, extra_args=None, callback=None, disable=None, eta=1., s_noise=1., noise_sampler=None, r=1 / 2):
687
+ """DPM-Solver++ (stochastic)."""
688
+ if len(sigmas) <= 1:
689
+ return x
690
+
691
+ sigma_min, sigma_max = sigmas[sigmas > 0].min(), sigmas.max()
692
+ seed = extra_args.get("seed", None)
693
+ noise_sampler = BrownianTreeNoiseSampler(x, sigma_min, sigma_max, seed=seed, cpu=True) if noise_sampler is None else noise_sampler
694
+ extra_args = {} if extra_args is None else extra_args
695
+ s_in = x.new_ones([x.shape[0]])
696
+ sigma_fn = lambda t: t.neg().exp()
697
+ t_fn = lambda sigma: sigma.log().neg()
698
+
699
+ for i in trange(len(sigmas) - 1, disable=disable):
700
+ denoised = model(x, sigmas[i] * s_in, **extra_args)
701
+ if callback is not None:
702
+ callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigmas[i], 'denoised': denoised})
703
+ if sigmas[i + 1] == 0:
704
+ # Euler method
705
+ d = to_d(x, sigmas[i], denoised)
706
+ dt = sigmas[i + 1] - sigmas[i]
707
+ x = x + d * dt
708
+ else:
709
+ # DPM-Solver++
710
+ t, t_next = t_fn(sigmas[i]), t_fn(sigmas[i + 1])
711
+ h = t_next - t
712
+ s = t + h * r
713
+ fac = 1 / (2 * r)
714
+
715
+ # Step 1
716
+ sd, su = get_ancestral_step(sigma_fn(t), sigma_fn(s), eta)
717
+ s_ = t_fn(sd)
718
+ x_2 = (sigma_fn(s_) / sigma_fn(t)) * x - (t - s_).expm1() * denoised
719
+ x_2 = x_2 + noise_sampler(sigma_fn(t), sigma_fn(s)) * s_noise * su
720
+ denoised_2 = model(x_2, sigma_fn(s) * s_in, **extra_args)
721
+
722
+ # Step 2
723
+ sd, su = get_ancestral_step(sigma_fn(t), sigma_fn(t_next), eta)
724
+ t_next_ = t_fn(sd)
725
+ denoised_d = (1 - fac) * denoised + fac * denoised_2
726
+ x = (sigma_fn(t_next_) / sigma_fn(t)) * x - (t - t_next_).expm1() * denoised_d
727
+ x = x + noise_sampler(sigma_fn(t), sigma_fn(t_next)) * s_noise * su
728
+ return x
729
+
730
+
731
+ @torch.no_grad()
732
+ def sample_dpmpp_2m(model, x, sigmas, extra_args=None, callback=None, disable=None):
733
+ """DPM-Solver++(2M)."""
734
+ extra_args = {} if extra_args is None else extra_args
735
+ s_in = x.new_ones([x.shape[0]])
736
+ sigma_fn = lambda t: t.neg().exp()
737
+ t_fn = lambda sigma: sigma.log().neg()
738
+ old_denoised = None
739
+
740
+ for i in trange(len(sigmas) - 1, disable=disable):
741
+ denoised = model(x, sigmas[i] * s_in, **extra_args)
742
+ if callback is not None:
743
+ callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigmas[i], 'denoised': denoised})
744
+ t, t_next = t_fn(sigmas[i]), t_fn(sigmas[i + 1])
745
+ h = t_next - t
746
+ if old_denoised is None or sigmas[i + 1] == 0:
747
+ x = (sigma_fn(t_next) / sigma_fn(t)) * x - (-h).expm1() * denoised
748
+ else:
749
+ h_last = t - t_fn(sigmas[i - 1])
750
+ r = h_last / h
751
+ denoised_d = (1 + 1 / (2 * r)) * denoised - (1 / (2 * r)) * old_denoised
752
+ x = (sigma_fn(t_next) / sigma_fn(t)) * x - (-h).expm1() * denoised_d
753
+ old_denoised = denoised
754
+ return x
755
+
756
+ @torch.no_grad()
757
+ def sample_dpmpp_2m_sde(model, x, sigmas, extra_args=None, callback=None, disable=None, eta=1., s_noise=1., noise_sampler=None, solver_type='midpoint'):
758
+ """DPM-Solver++(2M) SDE."""
759
+ if len(sigmas) <= 1:
760
+ return x
761
+
762
+ if solver_type not in {'heun', 'midpoint'}:
763
+ raise ValueError('solver_type must be \'heun\' or \'midpoint\'')
764
+
765
+ seed = extra_args.get("seed", None)
766
+ sigma_min, sigma_max = sigmas[sigmas > 0].min(), sigmas.max()
767
+ noise_sampler = BrownianTreeNoiseSampler(x, sigma_min, sigma_max, seed=seed, cpu=True) if noise_sampler is None else noise_sampler
768
+ extra_args = {} if extra_args is None else extra_args
769
+ s_in = x.new_ones([x.shape[0]])
770
+
771
+ old_denoised = None
772
+ h_last = None
773
+ h = None
774
+
775
+ for i in trange(len(sigmas) - 1, disable=disable):
776
+ denoised = model(x, sigmas[i] * s_in, **extra_args)
777
+ if callback is not None:
778
+ callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigmas[i], 'denoised': denoised})
779
+ if sigmas[i + 1] == 0:
780
+ # Denoising step
781
+ x = denoised
782
+ else:
783
+ # DPM-Solver++(2M) SDE
784
+ t, s = -sigmas[i].log(), -sigmas[i + 1].log()
785
+ h = s - t
786
+ eta_h = eta * h
787
+
788
+ x = sigmas[i + 1] / sigmas[i] * (-eta_h).exp() * x + (-h - eta_h).expm1().neg() * denoised
789
+
790
+ if old_denoised is not None:
791
+ r = h_last / h
792
+ if solver_type == 'heun':
793
+ x = x + ((-h - eta_h).expm1().neg() / (-h - eta_h) + 1) * (1 / r) * (denoised - old_denoised)
794
+ elif solver_type == 'midpoint':
795
+ x = x + 0.5 * (-h - eta_h).expm1().neg() * (1 / r) * (denoised - old_denoised)
796
+
797
+ if eta:
798
+ x = x + noise_sampler(sigmas[i], sigmas[i + 1]) * sigmas[i + 1] * (-2 * eta_h).expm1().neg().sqrt() * s_noise
799
+
800
+ old_denoised = denoised
801
+ h_last = h
802
+ return x
803
+
804
+ @torch.no_grad()
805
+ def sample_dpmpp_3m_sde(model, x, sigmas, extra_args=None, callback=None, disable=None, eta=1., s_noise=1., noise_sampler=None):
806
+ """DPM-Solver++(3M) SDE."""
807
+
808
+ if len(sigmas) <= 1:
809
+ return x
810
+
811
+ seed = extra_args.get("seed", None)
812
+ sigma_min, sigma_max = sigmas[sigmas > 0].min(), sigmas.max()
813
+ noise_sampler = BrownianTreeNoiseSampler(x, sigma_min, sigma_max, seed=seed, cpu=True) if noise_sampler is None else noise_sampler
814
+ extra_args = {} if extra_args is None else extra_args
815
+ s_in = x.new_ones([x.shape[0]])
816
+
817
+ denoised_1, denoised_2 = None, None
818
+ h, h_1, h_2 = None, None, None
819
+
820
+ for i in trange(len(sigmas) - 1, disable=disable):
821
+ denoised = model(x, sigmas[i] * s_in, **extra_args)
822
+ if callback is not None:
823
+ callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigmas[i], 'denoised': denoised})
824
+ if sigmas[i + 1] == 0:
825
+ # Denoising step
826
+ x = denoised
827
+ else:
828
+ t, s = -sigmas[i].log(), -sigmas[i + 1].log()
829
+ h = s - t
830
+ h_eta = h * (eta + 1)
831
+
832
+ x = torch.exp(-h_eta) * x + (-h_eta).expm1().neg() * denoised
833
+
834
+ if h_2 is not None:
835
+ r0 = h_1 / h
836
+ r1 = h_2 / h
837
+ d1_0 = (denoised - denoised_1) / r0
838
+ d1_1 = (denoised_1 - denoised_2) / r1
839
+ d1 = d1_0 + (d1_0 - d1_1) * r0 / (r0 + r1)
840
+ d2 = (d1_0 - d1_1) / (r0 + r1)
841
+ phi_2 = h_eta.neg().expm1() / h_eta + 1
842
+ phi_3 = phi_2 / h_eta - 0.5
843
+ x = x + phi_2 * d1 - phi_3 * d2
844
+ elif h_1 is not None:
845
+ r = h_1 / h
846
+ d = (denoised - denoised_1) / r
847
+ phi_2 = h_eta.neg().expm1() / h_eta + 1
848
+ x = x + phi_2 * d
849
+
850
+ if eta:
851
+ x = x + noise_sampler(sigmas[i], sigmas[i + 1]) * sigmas[i + 1] * (-2 * h * eta).expm1().neg().sqrt() * s_noise
852
+
853
+ denoised_1, denoised_2 = denoised, denoised_1
854
+ h_1, h_2 = h, h_1
855
+ return x
856
+
857
+ @torch.no_grad()
858
+ def sample_dpmpp_3m_sde_gpu(model, x, sigmas, extra_args=None, callback=None, disable=None, eta=1., s_noise=1., noise_sampler=None):
859
+ if len(sigmas) <= 1:
860
+ return x
861
+
862
+ sigma_min, sigma_max = sigmas[sigmas > 0].min(), sigmas.max()
863
+ noise_sampler = BrownianTreeNoiseSampler(x, sigma_min, sigma_max, seed=extra_args.get("seed", None), cpu=False) if noise_sampler is None else noise_sampler
864
+ return sample_dpmpp_3m_sde(model, x, sigmas, extra_args=extra_args, callback=callback, disable=disable, eta=eta, s_noise=s_noise, noise_sampler=noise_sampler)
865
+
866
+ @torch.no_grad()
867
+ def sample_dpmpp_2m_sde_gpu(model, x, sigmas, extra_args=None, callback=None, disable=None, eta=1., s_noise=1., noise_sampler=None, solver_type='midpoint'):
868
+ if len(sigmas) <= 1:
869
+ return x
870
+
871
+ sigma_min, sigma_max = sigmas[sigmas > 0].min(), sigmas.max()
872
+ noise_sampler = BrownianTreeNoiseSampler(x, sigma_min, sigma_max, seed=extra_args.get("seed", None), cpu=False) if noise_sampler is None else noise_sampler
873
+ return sample_dpmpp_2m_sde(model, x, sigmas, extra_args=extra_args, callback=callback, disable=disable, eta=eta, s_noise=s_noise, noise_sampler=noise_sampler, solver_type=solver_type)
874
+
875
+ @torch.no_grad()
876
+ def sample_dpmpp_sde_gpu(model, x, sigmas, extra_args=None, callback=None, disable=None, eta=1., s_noise=1., noise_sampler=None, r=1 / 2):
877
+ if len(sigmas) <= 1:
878
+ return x
879
+
880
+ sigma_min, sigma_max = sigmas[sigmas > 0].min(), sigmas.max()
881
+ noise_sampler = BrownianTreeNoiseSampler(x, sigma_min, sigma_max, seed=extra_args.get("seed", None), cpu=False) if noise_sampler is None else noise_sampler
882
+ return sample_dpmpp_sde(model, x, sigmas, extra_args=extra_args, callback=callback, disable=disable, eta=eta, s_noise=s_noise, noise_sampler=noise_sampler, r=r)
883
+
884
+
885
+ def DDPMSampler_step(x, sigma, sigma_prev, noise, noise_sampler):
886
+ alpha_cumprod = 1 / ((sigma * sigma) + 1)
887
+ alpha_cumprod_prev = 1 / ((sigma_prev * sigma_prev) + 1)
888
+ alpha = (alpha_cumprod / alpha_cumprod_prev)
889
+
890
+ mu = (1.0 / alpha).sqrt() * (x - (1 - alpha) * noise / (1 - alpha_cumprod).sqrt())
891
+ if sigma_prev > 0:
892
+ mu += ((1 - alpha) * (1. - alpha_cumprod_prev) / (1. - alpha_cumprod)).sqrt() * noise_sampler(sigma, sigma_prev)
893
+ return mu
894
+
895
+ def generic_step_sampler(model, x, sigmas, extra_args=None, callback=None, disable=None, noise_sampler=None, step_function=None):
896
+ extra_args = {} if extra_args is None else extra_args
897
+ seed = extra_args.get("seed", None)
898
+ noise_sampler = default_noise_sampler(x, seed=seed) if noise_sampler is None else noise_sampler
899
+ s_in = x.new_ones([x.shape[0]])
900
+
901
+ for i in trange(len(sigmas) - 1, disable=disable):
902
+ denoised = model(x, sigmas[i] * s_in, **extra_args)
903
+ if callback is not None:
904
+ callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigmas[i], 'denoised': denoised})
905
+ x = step_function(x / torch.sqrt(1.0 + sigmas[i] ** 2.0), sigmas[i], sigmas[i + 1], (x - denoised) / sigmas[i], noise_sampler)
906
+ if sigmas[i + 1] != 0:
907
+ x *= torch.sqrt(1.0 + sigmas[i + 1] ** 2.0)
908
+ return x
909
+
910
+
911
+ @torch.no_grad()
912
+ def sample_ddpm(model, x, sigmas, extra_args=None, callback=None, disable=None, noise_sampler=None):
913
+ return generic_step_sampler(model, x, sigmas, extra_args, callback, disable, noise_sampler, DDPMSampler_step)
914
+
915
+ @torch.no_grad()
916
+ def sample_lcm(model, x, sigmas, extra_args=None, callback=None, disable=None, noise_sampler=None):
917
+ extra_args = {} if extra_args is None else extra_args
918
+ seed = extra_args.get("seed", None)
919
+ noise_sampler = default_noise_sampler(x, seed=seed) if noise_sampler is None else noise_sampler
920
+ s_in = x.new_ones([x.shape[0]])
921
+ for i in trange(len(sigmas) - 1, disable=disable):
922
+ denoised = model(x, sigmas[i] * s_in, **extra_args)
923
+ if callback is not None:
924
+ callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigmas[i], 'denoised': denoised})
925
+
926
+ x = denoised
927
+ if sigmas[i + 1] > 0:
928
+ x = model.inner_model.inner_model.model_sampling.noise_scaling(sigmas[i + 1], noise_sampler(sigmas[i], sigmas[i + 1]), x)
929
+ return x
930
+
931
+
932
+
933
+ @torch.no_grad()
934
+ def sample_heunpp2(model, x, sigmas, extra_args=None, callback=None, disable=None, s_churn=0., s_tmin=0., s_tmax=float('inf'), s_noise=1.):
935
+ # From MIT licensed: https://github.com/Carzit/sd-webui-samplers-scheduler/
936
+ extra_args = {} if extra_args is None else extra_args
937
+ s_in = x.new_ones([x.shape[0]])
938
+ s_end = sigmas[-1]
939
+ for i in trange(len(sigmas) - 1, disable=disable):
940
+ gamma = min(s_churn / (len(sigmas) - 1), 2 ** 0.5 - 1) if s_tmin <= sigmas[i] <= s_tmax else 0.
941
+ eps = torch.randn_like(x) * s_noise
942
+ sigma_hat = sigmas[i] * (gamma + 1)
943
+ if gamma > 0:
944
+ x = x + eps * (sigma_hat ** 2 - sigmas[i] ** 2) ** 0.5
945
+ denoised = model(x, sigma_hat * s_in, **extra_args)
946
+ d = to_d(x, sigma_hat, denoised)
947
+ if callback is not None:
948
+ callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigma_hat, 'denoised': denoised})
949
+ dt = sigmas[i + 1] - sigma_hat
950
+ if sigmas[i + 1] == s_end:
951
+ # Euler method
952
+ x = x + d * dt
953
+ elif sigmas[i + 2] == s_end:
954
+
955
+ # Heun's method
956
+ x_2 = x + d * dt
957
+ denoised_2 = model(x_2, sigmas[i + 1] * s_in, **extra_args)
958
+ d_2 = to_d(x_2, sigmas[i + 1], denoised_2)
959
+
960
+ w = 2 * sigmas[0]
961
+ w2 = sigmas[i+1]/w
962
+ w1 = 1 - w2
963
+
964
+ d_prime = d * w1 + d_2 * w2
965
+
966
+
967
+ x = x + d_prime * dt
968
+
969
+ else:
970
+ # Heun++
971
+ x_2 = x + d * dt
972
+ denoised_2 = model(x_2, sigmas[i + 1] * s_in, **extra_args)
973
+ d_2 = to_d(x_2, sigmas[i + 1], denoised_2)
974
+ dt_2 = sigmas[i + 2] - sigmas[i + 1]
975
+
976
+ x_3 = x_2 + d_2 * dt_2
977
+ denoised_3 = model(x_3, sigmas[i + 2] * s_in, **extra_args)
978
+ d_3 = to_d(x_3, sigmas[i + 2], denoised_3)
979
+
980
+ w = 3 * sigmas[0]
981
+ w2 = sigmas[i + 1] / w
982
+ w3 = sigmas[i + 2] / w
983
+ w1 = 1 - w2 - w3
984
+
985
+ d_prime = w1 * d + w2 * d_2 + w3 * d_3
986
+ x = x + d_prime * dt
987
+ return x
988
+
989
+
990
+ #From https://github.com/zju-pi/diff-sampler/blob/main/diff-solvers-main/solvers.py
991
+ #under Apache 2 license
992
+ def sample_ipndm(model, x, sigmas, extra_args=None, callback=None, disable=None, max_order=4):
993
+ extra_args = {} if extra_args is None else extra_args
994
+ s_in = x.new_ones([x.shape[0]])
995
+
996
+ x_next = x
997
+
998
+ buffer_model = []
999
+ for i in trange(len(sigmas) - 1, disable=disable):
1000
+ t_cur = sigmas[i]
1001
+ t_next = sigmas[i + 1]
1002
+
1003
+ x_cur = x_next
1004
+
1005
+ denoised = model(x_cur, t_cur * s_in, **extra_args)
1006
+ if callback is not None:
1007
+ callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigmas[i], 'denoised': denoised})
1008
+
1009
+ d_cur = (x_cur - denoised) / t_cur
1010
+
1011
+ order = min(max_order, i+1)
1012
+ if order == 1: # First Euler step.
1013
+ x_next = x_cur + (t_next - t_cur) * d_cur
1014
+ elif order == 2: # Use one history point.
1015
+ x_next = x_cur + (t_next - t_cur) * (3 * d_cur - buffer_model[-1]) / 2
1016
+ elif order == 3: # Use two history points.
1017
+ x_next = x_cur + (t_next - t_cur) * (23 * d_cur - 16 * buffer_model[-1] + 5 * buffer_model[-2]) / 12
1018
+ elif order == 4: # Use three history points.
1019
+ x_next = x_cur + (t_next - t_cur) * (55 * d_cur - 59 * buffer_model[-1] + 37 * buffer_model[-2] - 9 * buffer_model[-3]) / 24
1020
+
1021
+ if len(buffer_model) == max_order - 1:
1022
+ for k in range(max_order - 2):
1023
+ buffer_model[k] = buffer_model[k+1]
1024
+ buffer_model[-1] = d_cur
1025
+ else:
1026
+ buffer_model.append(d_cur)
1027
+
1028
+ return x_next
1029
+
1030
+ #From https://github.com/zju-pi/diff-sampler/blob/main/diff-solvers-main/solvers.py
1031
+ #under Apache 2 license
1032
+ def sample_ipndm_v(model, x, sigmas, extra_args=None, callback=None, disable=None, max_order=4):
1033
+ extra_args = {} if extra_args is None else extra_args
1034
+ s_in = x.new_ones([x.shape[0]])
1035
+
1036
+ x_next = x
1037
+ t_steps = sigmas
1038
+
1039
+ buffer_model = []
1040
+ for i in trange(len(sigmas) - 1, disable=disable):
1041
+ t_cur = sigmas[i]
1042
+ t_next = sigmas[i + 1]
1043
+
1044
+ x_cur = x_next
1045
+
1046
+ denoised = model(x_cur, t_cur * s_in, **extra_args)
1047
+ if callback is not None:
1048
+ callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigmas[i], 'denoised': denoised})
1049
+
1050
+ d_cur = (x_cur - denoised) / t_cur
1051
+
1052
+ order = min(max_order, i+1)
1053
+ if order == 1: # First Euler step.
1054
+ x_next = x_cur + (t_next - t_cur) * d_cur
1055
+ elif order == 2: # Use one history point.
1056
+ h_n = (t_next - t_cur)
1057
+ h_n_1 = (t_cur - t_steps[i-1])
1058
+ coeff1 = (2 + (h_n / h_n_1)) / 2
1059
+ coeff2 = -(h_n / h_n_1) / 2
1060
+ x_next = x_cur + (t_next - t_cur) * (coeff1 * d_cur + coeff2 * buffer_model[-1])
1061
+ elif order == 3: # Use two history points.
1062
+ h_n = (t_next - t_cur)
1063
+ h_n_1 = (t_cur - t_steps[i-1])
1064
+ h_n_2 = (t_steps[i-1] - t_steps[i-2])
1065
+ temp = (1 - h_n / (3 * (h_n + h_n_1)) * (h_n * (h_n + h_n_1)) / (h_n_1 * (h_n_1 + h_n_2))) / 2
1066
+ coeff1 = (2 + (h_n / h_n_1)) / 2 + temp
1067
+ coeff2 = -(h_n / h_n_1) / 2 - (1 + h_n_1 / h_n_2) * temp
1068
+ coeff3 = temp * h_n_1 / h_n_2
1069
+ x_next = x_cur + (t_next - t_cur) * (coeff1 * d_cur + coeff2 * buffer_model[-1] + coeff3 * buffer_model[-2])
1070
+ elif order == 4: # Use three history points.
1071
+ h_n = (t_next - t_cur)
1072
+ h_n_1 = (t_cur - t_steps[i-1])
1073
+ h_n_2 = (t_steps[i-1] - t_steps[i-2])
1074
+ h_n_3 = (t_steps[i-2] - t_steps[i-3])
1075
+ temp1 = (1 - h_n / (3 * (h_n + h_n_1)) * (h_n * (h_n + h_n_1)) / (h_n_1 * (h_n_1 + h_n_2))) / 2
1076
+ temp2 = ((1 - h_n / (3 * (h_n + h_n_1))) / 2 + (1 - h_n / (2 * (h_n + h_n_1))) * h_n / (6 * (h_n + h_n_1 + h_n_2))) \
1077
+ * (h_n * (h_n + h_n_1) * (h_n + h_n_1 + h_n_2)) / (h_n_1 * (h_n_1 + h_n_2) * (h_n_1 + h_n_2 + h_n_3))
1078
+ coeff1 = (2 + (h_n / h_n_1)) / 2 + temp1 + temp2
1079
+ coeff2 = -(h_n / h_n_1) / 2 - (1 + h_n_1 / h_n_2) * temp1 - (1 + (h_n_1 / h_n_2) + (h_n_1 * (h_n_1 + h_n_2) / (h_n_2 * (h_n_2 + h_n_3)))) * temp2
1080
+ coeff3 = temp1 * h_n_1 / h_n_2 + ((h_n_1 / h_n_2) + (h_n_1 * (h_n_1 + h_n_2) / (h_n_2 * (h_n_2 + h_n_3))) * (1 + h_n_2 / h_n_3)) * temp2
1081
+ coeff4 = -temp2 * (h_n_1 * (h_n_1 + h_n_2) / (h_n_2 * (h_n_2 + h_n_3))) * h_n_1 / h_n_2
1082
+ x_next = x_cur + (t_next - t_cur) * (coeff1 * d_cur + coeff2 * buffer_model[-1] + coeff3 * buffer_model[-2] + coeff4 * buffer_model[-3])
1083
+
1084
+ if len(buffer_model) == max_order - 1:
1085
+ for k in range(max_order - 2):
1086
+ buffer_model[k] = buffer_model[k+1]
1087
+ buffer_model[-1] = d_cur.detach()
1088
+ else:
1089
+ buffer_model.append(d_cur.detach())
1090
+
1091
+ return x_next
1092
+
1093
+ #From https://github.com/zju-pi/diff-sampler/blob/main/diff-solvers-main/solvers.py
1094
+ #under Apache 2 license
1095
+ @torch.no_grad()
1096
+ def sample_deis(model, x, sigmas, extra_args=None, callback=None, disable=None, max_order=3, deis_mode='tab'):
1097
+ extra_args = {} if extra_args is None else extra_args
1098
+ s_in = x.new_ones([x.shape[0]])
1099
+
1100
+ x_next = x
1101
+ t_steps = sigmas
1102
+
1103
+ coeff_list = deis.get_deis_coeff_list(t_steps, max_order, deis_mode=deis_mode)
1104
+
1105
+ buffer_model = []
1106
+ for i in trange(len(sigmas) - 1, disable=disable):
1107
+ t_cur = sigmas[i]
1108
+ t_next = sigmas[i + 1]
1109
+
1110
+ x_cur = x_next
1111
+
1112
+ denoised = model(x_cur, t_cur * s_in, **extra_args)
1113
+ if callback is not None:
1114
+ callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigmas[i], 'denoised': denoised})
1115
+
1116
+ d_cur = (x_cur - denoised) / t_cur
1117
+
1118
+ order = min(max_order, i+1)
1119
+ if t_next <= 0:
1120
+ order = 1
1121
+
1122
+ if order == 1: # First Euler step.
1123
+ x_next = x_cur + (t_next - t_cur) * d_cur
1124
+ elif order == 2: # Use one history point.
1125
+ coeff_cur, coeff_prev1 = coeff_list[i]
1126
+ x_next = x_cur + coeff_cur * d_cur + coeff_prev1 * buffer_model[-1]
1127
+ elif order == 3: # Use two history points.
1128
+ coeff_cur, coeff_prev1, coeff_prev2 = coeff_list[i]
1129
+ x_next = x_cur + coeff_cur * d_cur + coeff_prev1 * buffer_model[-1] + coeff_prev2 * buffer_model[-2]
1130
+ elif order == 4: # Use three history points.
1131
+ coeff_cur, coeff_prev1, coeff_prev2, coeff_prev3 = coeff_list[i]
1132
+ x_next = x_cur + coeff_cur * d_cur + coeff_prev1 * buffer_model[-1] + coeff_prev2 * buffer_model[-2] + coeff_prev3 * buffer_model[-3]
1133
+
1134
+ if len(buffer_model) == max_order - 1:
1135
+ for k in range(max_order - 2):
1136
+ buffer_model[k] = buffer_model[k+1]
1137
+ buffer_model[-1] = d_cur.detach()
1138
+ else:
1139
+ buffer_model.append(d_cur.detach())
1140
+
1141
+ return x_next
1142
+
1143
+ @torch.no_grad()
1144
+ def sample_euler_cfg_pp(model, x, sigmas, extra_args=None, callback=None, disable=None):
1145
+ extra_args = {} if extra_args is None else extra_args
1146
+
1147
+ temp = [0]
1148
+ def post_cfg_function(args):
1149
+ temp[0] = args["uncond_denoised"]
1150
+ return args["denoised"]
1151
+
1152
+ model_options = extra_args.get("model_options", {}).copy()
1153
+ extra_args["model_options"] = comfy.model_patcher.set_model_options_post_cfg_function(model_options, post_cfg_function, disable_cfg1_optimization=True)
1154
+
1155
+ s_in = x.new_ones([x.shape[0]])
1156
+ for i in trange(len(sigmas) - 1, disable=disable):
1157
+ sigma_hat = sigmas[i]
1158
+ denoised = model(x, sigma_hat * s_in, **extra_args)
1159
+ d = to_d(x, sigma_hat, temp[0])
1160
+ if callback is not None:
1161
+ callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigma_hat, 'denoised': denoised})
1162
+ # Euler method
1163
+ x = denoised + d * sigmas[i + 1]
1164
+ return x
1165
+
1166
+ @torch.no_grad()
1167
+ def sample_euler_ancestral_cfg_pp(model, x, sigmas, extra_args=None, callback=None, disable=None, eta=1., s_noise=1., noise_sampler=None):
1168
+ """Ancestral sampling with Euler method steps."""
1169
+ extra_args = {} if extra_args is None else extra_args
1170
+ seed = extra_args.get("seed", None)
1171
+ noise_sampler = default_noise_sampler(x, seed=seed) if noise_sampler is None else noise_sampler
1172
+
1173
+ temp = [0]
1174
+ def post_cfg_function(args):
1175
+ temp[0] = args["uncond_denoised"]
1176
+ return args["denoised"]
1177
+
1178
+ model_options = extra_args.get("model_options", {}).copy()
1179
+ extra_args["model_options"] = comfy.model_patcher.set_model_options_post_cfg_function(model_options, post_cfg_function, disable_cfg1_optimization=True)
1180
+
1181
+ s_in = x.new_ones([x.shape[0]])
1182
+ for i in trange(len(sigmas) - 1, disable=disable):
1183
+ denoised = model(x, sigmas[i] * s_in, **extra_args)
1184
+ sigma_down, sigma_up = get_ancestral_step(sigmas[i], sigmas[i + 1], eta=eta)
1185
+ if callback is not None:
1186
+ callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigmas[i], 'denoised': denoised})
1187
+ d = to_d(x, sigmas[i], temp[0])
1188
+ # Euler method
1189
+ x = denoised + d * sigma_down
1190
+ if sigmas[i + 1] > 0:
1191
+ x = x + noise_sampler(sigmas[i], sigmas[i + 1]) * s_noise * sigma_up
1192
+ return x
1193
+ @torch.no_grad()
1194
+ def sample_dpmpp_2s_ancestral_cfg_pp(model, x, sigmas, extra_args=None, callback=None, disable=None, eta=1., s_noise=1., noise_sampler=None):
1195
+ """Ancestral sampling with DPM-Solver++(2S) second-order steps."""
1196
+ extra_args = {} if extra_args is None else extra_args
1197
+ seed = extra_args.get("seed", None)
1198
+ noise_sampler = default_noise_sampler(x, seed=seed) if noise_sampler is None else noise_sampler
1199
+
1200
+ temp = [0]
1201
+ def post_cfg_function(args):
1202
+ temp[0] = args["uncond_denoised"]
1203
+ return args["denoised"]
1204
+
1205
+ model_options = extra_args.get("model_options", {}).copy()
1206
+ extra_args["model_options"] = comfy.model_patcher.set_model_options_post_cfg_function(model_options, post_cfg_function, disable_cfg1_optimization=True)
1207
+
1208
+ s_in = x.new_ones([x.shape[0]])
1209
+ sigma_fn = lambda t: t.neg().exp()
1210
+ t_fn = lambda sigma: sigma.log().neg()
1211
+
1212
+ for i in trange(len(sigmas) - 1, disable=disable):
1213
+ denoised = model(x, sigmas[i] * s_in, **extra_args)
1214
+ sigma_down, sigma_up = get_ancestral_step(sigmas[i], sigmas[i + 1], eta=eta)
1215
+ if callback is not None:
1216
+ callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigmas[i], 'denoised': denoised})
1217
+ if sigma_down == 0:
1218
+ # Euler method
1219
+ d = to_d(x, sigmas[i], temp[0])
1220
+ x = denoised + d * sigma_down
1221
+ else:
1222
+ # DPM-Solver++(2S)
1223
+ t, t_next = t_fn(sigmas[i]), t_fn(sigma_down)
1224
+ # r = torch.sinh(1 + (2 - eta) * (t_next - t) / (t - t_fn(sigma_up))) works only on non-cfgpp, weird
1225
+ r = 1 / 2
1226
+ h = t_next - t
1227
+ s = t + r * h
1228
+ x_2 = (sigma_fn(s) / sigma_fn(t)) * (x + (denoised - temp[0])) - (-h * r).expm1() * denoised
1229
+ denoised_2 = model(x_2, sigma_fn(s) * s_in, **extra_args)
1230
+ x = (sigma_fn(t_next) / sigma_fn(t)) * (x + (denoised - temp[0])) - (-h).expm1() * denoised_2
1231
+ # Noise addition
1232
+ if sigmas[i + 1] > 0:
1233
+ x = x + noise_sampler(sigmas[i], sigmas[i + 1]) * s_noise * sigma_up
1234
+ return x
1235
+
1236
+ @torch.no_grad()
1237
+ def sample_dpmpp_2m_cfg_pp(model, x, sigmas, extra_args=None, callback=None, disable=None):
1238
+ """DPM-Solver++(2M)."""
1239
+ extra_args = {} if extra_args is None else extra_args
1240
+ s_in = x.new_ones([x.shape[0]])
1241
+ t_fn = lambda sigma: sigma.log().neg()
1242
+
1243
+ old_uncond_denoised = None
1244
+ uncond_denoised = None
1245
+ def post_cfg_function(args):
1246
+ nonlocal uncond_denoised
1247
+ uncond_denoised = args["uncond_denoised"]
1248
+ return args["denoised"]
1249
+
1250
+ model_options = extra_args.get("model_options", {}).copy()
1251
+ extra_args["model_options"] = comfy.model_patcher.set_model_options_post_cfg_function(model_options, post_cfg_function, disable_cfg1_optimization=True)
1252
+
1253
+ for i in trange(len(sigmas) - 1, disable=disable):
1254
+ denoised = model(x, sigmas[i] * s_in, **extra_args)
1255
+ if callback is not None:
1256
+ callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigmas[i], 'denoised': denoised})
1257
+ t, t_next = t_fn(sigmas[i]), t_fn(sigmas[i + 1])
1258
+ h = t_next - t
1259
+ if old_uncond_denoised is None or sigmas[i + 1] == 0:
1260
+ denoised_mix = -torch.exp(-h) * uncond_denoised
1261
+ else:
1262
+ h_last = t - t_fn(sigmas[i - 1])
1263
+ r = h_last / h
1264
+ denoised_mix = -torch.exp(-h) * uncond_denoised - torch.expm1(-h) * (1 / (2 * r)) * (denoised - old_uncond_denoised)
1265
+ x = denoised + denoised_mix + torch.exp(-h) * x
1266
+ old_uncond_denoised = uncond_denoised
1267
+ return x
1268
+
1269
+ @torch.no_grad()
1270
+ def res_multistep(model, x, sigmas, extra_args=None, callback=None, disable=None, s_churn=0., s_tmin=0., s_tmax=float('inf'), s_noise=1., noise_sampler=None, cfg_pp=False):
1271
+ extra_args = {} if extra_args is None else extra_args
1272
+ seed = extra_args.get("seed", None)
1273
+ noise_sampler = default_noise_sampler(x, seed=seed) if noise_sampler is None else noise_sampler
1274
+ s_in = x.new_ones([x.shape[0]])
1275
+ sigma_fn = lambda t: t.neg().exp()
1276
+ t_fn = lambda sigma: sigma.log().neg()
1277
+ phi1_fn = lambda t: torch.expm1(t) / t
1278
+ phi2_fn = lambda t: (phi1_fn(t) - 1.0) / t
1279
+
1280
+ old_denoised = None
1281
+ uncond_denoised = None
1282
+ def post_cfg_function(args):
1283
+ nonlocal uncond_denoised
1284
+ uncond_denoised = args["uncond_denoised"]
1285
+ return args["denoised"]
1286
+
1287
+ if cfg_pp:
1288
+ model_options = extra_args.get("model_options", {}).copy()
1289
+ extra_args["model_options"] = comfy.model_patcher.set_model_options_post_cfg_function(model_options, post_cfg_function, disable_cfg1_optimization=True)
1290
+
1291
+ for i in trange(len(sigmas) - 1, disable=disable):
1292
+ if s_churn > 0:
1293
+ gamma = min(s_churn / (len(sigmas) - 1), 2**0.5 - 1) if s_tmin <= sigmas[i] <= s_tmax else 0.0
1294
+ sigma_hat = sigmas[i] * (gamma + 1)
1295
+ else:
1296
+ gamma = 0
1297
+ sigma_hat = sigmas[i]
1298
+
1299
+ if gamma > 0:
1300
+ eps = torch.randn_like(x) * s_noise
1301
+ x = x + eps * (sigma_hat**2 - sigmas[i] ** 2) ** 0.5
1302
+ denoised = model(x, sigma_hat * s_in, **extra_args)
1303
+ if callback is not None:
1304
+ callback({"x": x, "i": i, "sigma": sigmas[i], "sigma_hat": sigma_hat, "denoised": denoised})
1305
+ if sigmas[i + 1] == 0 or old_denoised is None:
1306
+ # Euler method
1307
+ if cfg_pp:
1308
+ d = to_d(x, sigma_hat, uncond_denoised)
1309
+ x = denoised + d * sigmas[i + 1]
1310
+ else:
1311
+ d = to_d(x, sigma_hat, denoised)
1312
+ dt = sigmas[i + 1] - sigma_hat
1313
+ x = x + d * dt
1314
+ else:
1315
+ # Second order multistep method in https://arxiv.org/pdf/2308.02157
1316
+ t, t_next, t_prev = t_fn(sigmas[i]), t_fn(sigmas[i + 1]), t_fn(sigmas[i - 1])
1317
+ h = t_next - t
1318
+ c2 = (t_prev - t) / h
1319
+
1320
+ phi1_val, phi2_val = phi1_fn(-h), phi2_fn(-h)
1321
+ b1 = torch.nan_to_num(phi1_val - 1.0 / c2 * phi2_val, nan=0.0)
1322
+ b2 = torch.nan_to_num(1.0 / c2 * phi2_val, nan=0.0)
1323
+
1324
+ if cfg_pp:
1325
+ x = x + (denoised - uncond_denoised)
1326
+
1327
+ x = (sigma_fn(t_next) / sigma_fn(t)) * x + h * (b1 * denoised + b2 * old_denoised)
1328
+
1329
+ old_denoised = denoised
1330
+ return x
1331
+
1332
+ @torch.no_grad()
1333
+ def sample_res_multistep(model, x, sigmas, extra_args=None, callback=None, disable=None, s_churn=0., s_tmin=0., s_tmax=float('inf'), s_noise=1., noise_sampler=None):
1334
+ return res_multistep(model, x, sigmas, extra_args=extra_args, callback=callback, disable=disable, s_churn=s_churn, s_tmin=s_tmin, s_tmax=s_tmax, s_noise=s_noise, noise_sampler=noise_sampler, cfg_pp=False)
1335
+
1336
+ @torch.no_grad()
1337
+ def sample_res_multistep_cfg_pp(model, x, sigmas, extra_args=None, callback=None, disable=None, s_churn=0., s_tmin=0., s_tmax=float('inf'), s_noise=1., noise_sampler=None):
1338
+ return res_multistep(model, x, sigmas, extra_args=extra_args, callback=callback, disable=disable, s_churn=s_churn, s_tmin=s_tmin, s_tmax=s_tmax, s_noise=s_noise, noise_sampler=noise_sampler, cfg_pp=True)
comfy/k_diffusion/utils.py ADDED
@@ -0,0 +1,313 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from contextlib import contextmanager
2
+ import hashlib
3
+ import math
4
+ from pathlib import Path
5
+ import shutil
6
+ import urllib
7
+ import warnings
8
+
9
+ from PIL import Image
10
+ import torch
11
+ from torch import nn, optim
12
+ from torch.utils import data
13
+
14
+
15
+ def hf_datasets_augs_helper(examples, transform, image_key, mode='RGB'):
16
+ """Apply passed in transforms for HuggingFace Datasets."""
17
+ images = [transform(image.convert(mode)) for image in examples[image_key]]
18
+ return {image_key: images}
19
+
20
+
21
+ def append_dims(x, target_dims):
22
+ """Appends dimensions to the end of a tensor until it has target_dims dimensions."""
23
+ dims_to_append = target_dims - x.ndim
24
+ if dims_to_append < 0:
25
+ raise ValueError(f'input has {x.ndim} dims but target_dims is {target_dims}, which is less')
26
+ expanded = x[(...,) + (None,) * dims_to_append]
27
+ # MPS will get inf values if it tries to index into the new axes, but detaching fixes this.
28
+ # https://github.com/pytorch/pytorch/issues/84364
29
+ return expanded.detach().clone() if expanded.device.type == 'mps' else expanded
30
+
31
+
32
+ def n_params(module):
33
+ """Returns the number of trainable parameters in a module."""
34
+ return sum(p.numel() for p in module.parameters())
35
+
36
+
37
+ def download_file(path, url, digest=None):
38
+ """Downloads a file if it does not exist, optionally checking its SHA-256 hash."""
39
+ path = Path(path)
40
+ path.parent.mkdir(parents=True, exist_ok=True)
41
+ if not path.exists():
42
+ with urllib.request.urlopen(url) as response, open(path, 'wb') as f:
43
+ shutil.copyfileobj(response, f)
44
+ if digest is not None:
45
+ file_digest = hashlib.sha256(open(path, 'rb').read()).hexdigest()
46
+ if digest != file_digest:
47
+ raise OSError(f'hash of {path} (url: {url}) failed to validate')
48
+ return path
49
+
50
+
51
+ @contextmanager
52
+ def train_mode(model, mode=True):
53
+ """A context manager that places a model into training mode and restores
54
+ the previous mode on exit."""
55
+ modes = [module.training for module in model.modules()]
56
+ try:
57
+ yield model.train(mode)
58
+ finally:
59
+ for i, module in enumerate(model.modules()):
60
+ module.training = modes[i]
61
+
62
+
63
+ def eval_mode(model):
64
+ """A context manager that places a model into evaluation mode and restores
65
+ the previous mode on exit."""
66
+ return train_mode(model, False)
67
+
68
+
69
+ @torch.no_grad()
70
+ def ema_update(model, averaged_model, decay):
71
+ """Incorporates updated model parameters into an exponential moving averaged
72
+ version of a model. It should be called after each optimizer step."""
73
+ model_params = dict(model.named_parameters())
74
+ averaged_params = dict(averaged_model.named_parameters())
75
+ assert model_params.keys() == averaged_params.keys()
76
+
77
+ for name, param in model_params.items():
78
+ averaged_params[name].mul_(decay).add_(param, alpha=1 - decay)
79
+
80
+ model_buffers = dict(model.named_buffers())
81
+ averaged_buffers = dict(averaged_model.named_buffers())
82
+ assert model_buffers.keys() == averaged_buffers.keys()
83
+
84
+ for name, buf in model_buffers.items():
85
+ averaged_buffers[name].copy_(buf)
86
+
87
+
88
+ class EMAWarmup:
89
+ """Implements an EMA warmup using an inverse decay schedule.
90
+ If inv_gamma=1 and power=1, implements a simple average. inv_gamma=1, power=2/3 are
91
+ good values for models you plan to train for a million or more steps (reaches decay
92
+ factor 0.999 at 31.6K steps, 0.9999 at 1M steps), inv_gamma=1, power=3/4 for models
93
+ you plan to train for less (reaches decay factor 0.999 at 10K steps, 0.9999 at
94
+ 215.4k steps).
95
+ Args:
96
+ inv_gamma (float): Inverse multiplicative factor of EMA warmup. Default: 1.
97
+ power (float): Exponential factor of EMA warmup. Default: 1.
98
+ min_value (float): The minimum EMA decay rate. Default: 0.
99
+ max_value (float): The maximum EMA decay rate. Default: 1.
100
+ start_at (int): The epoch to start averaging at. Default: 0.
101
+ last_epoch (int): The index of last epoch. Default: 0.
102
+ """
103
+
104
+ def __init__(self, inv_gamma=1., power=1., min_value=0., max_value=1., start_at=0,
105
+ last_epoch=0):
106
+ self.inv_gamma = inv_gamma
107
+ self.power = power
108
+ self.min_value = min_value
109
+ self.max_value = max_value
110
+ self.start_at = start_at
111
+ self.last_epoch = last_epoch
112
+
113
+ def state_dict(self):
114
+ """Returns the state of the class as a :class:`dict`."""
115
+ return dict(self.__dict__.items())
116
+
117
+ def load_state_dict(self, state_dict):
118
+ """Loads the class's state.
119
+ Args:
120
+ state_dict (dict): scaler state. Should be an object returned
121
+ from a call to :meth:`state_dict`.
122
+ """
123
+ self.__dict__.update(state_dict)
124
+
125
+ def get_value(self):
126
+ """Gets the current EMA decay rate."""
127
+ epoch = max(0, self.last_epoch - self.start_at)
128
+ value = 1 - (1 + epoch / self.inv_gamma) ** -self.power
129
+ return 0. if epoch < 0 else min(self.max_value, max(self.min_value, value))
130
+
131
+ def step(self):
132
+ """Updates the step count."""
133
+ self.last_epoch += 1
134
+
135
+
136
+ class InverseLR(optim.lr_scheduler._LRScheduler):
137
+ """Implements an inverse decay learning rate schedule with an optional exponential
138
+ warmup. When last_epoch=-1, sets initial lr as lr.
139
+ inv_gamma is the number of steps/epochs required for the learning rate to decay to
140
+ (1 / 2)**power of its original value.
141
+ Args:
142
+ optimizer (Optimizer): Wrapped optimizer.
143
+ inv_gamma (float): Inverse multiplicative factor of learning rate decay. Default: 1.
144
+ power (float): Exponential factor of learning rate decay. Default: 1.
145
+ warmup (float): Exponential warmup factor (0 <= warmup < 1, 0 to disable)
146
+ Default: 0.
147
+ min_lr (float): The minimum learning rate. Default: 0.
148
+ last_epoch (int): The index of last epoch. Default: -1.
149
+ verbose (bool): If ``True``, prints a message to stdout for
150
+ each update. Default: ``False``.
151
+ """
152
+
153
+ def __init__(self, optimizer, inv_gamma=1., power=1., warmup=0., min_lr=0.,
154
+ last_epoch=-1, verbose=False):
155
+ self.inv_gamma = inv_gamma
156
+ self.power = power
157
+ if not 0. <= warmup < 1:
158
+ raise ValueError('Invalid value for warmup')
159
+ self.warmup = warmup
160
+ self.min_lr = min_lr
161
+ super().__init__(optimizer, last_epoch, verbose)
162
+
163
+ def get_lr(self):
164
+ if not self._get_lr_called_within_step:
165
+ warnings.warn("To get the last learning rate computed by the scheduler, "
166
+ "please use `get_last_lr()`.")
167
+
168
+ return self._get_closed_form_lr()
169
+
170
+ def _get_closed_form_lr(self):
171
+ warmup = 1 - self.warmup ** (self.last_epoch + 1)
172
+ lr_mult = (1 + self.last_epoch / self.inv_gamma) ** -self.power
173
+ return [warmup * max(self.min_lr, base_lr * lr_mult)
174
+ for base_lr in self.base_lrs]
175
+
176
+
177
+ class ExponentialLR(optim.lr_scheduler._LRScheduler):
178
+ """Implements an exponential learning rate schedule with an optional exponential
179
+ warmup. When last_epoch=-1, sets initial lr as lr. Decays the learning rate
180
+ continuously by decay (default 0.5) every num_steps steps.
181
+ Args:
182
+ optimizer (Optimizer): Wrapped optimizer.
183
+ num_steps (float): The number of steps to decay the learning rate by decay in.
184
+ decay (float): The factor by which to decay the learning rate every num_steps
185
+ steps. Default: 0.5.
186
+ warmup (float): Exponential warmup factor (0 <= warmup < 1, 0 to disable)
187
+ Default: 0.
188
+ min_lr (float): The minimum learning rate. Default: 0.
189
+ last_epoch (int): The index of last epoch. Default: -1.
190
+ verbose (bool): If ``True``, prints a message to stdout for
191
+ each update. Default: ``False``.
192
+ """
193
+
194
+ def __init__(self, optimizer, num_steps, decay=0.5, warmup=0., min_lr=0.,
195
+ last_epoch=-1, verbose=False):
196
+ self.num_steps = num_steps
197
+ self.decay = decay
198
+ if not 0. <= warmup < 1:
199
+ raise ValueError('Invalid value for warmup')
200
+ self.warmup = warmup
201
+ self.min_lr = min_lr
202
+ super().__init__(optimizer, last_epoch, verbose)
203
+
204
+ def get_lr(self):
205
+ if not self._get_lr_called_within_step:
206
+ warnings.warn("To get the last learning rate computed by the scheduler, "
207
+ "please use `get_last_lr()`.")
208
+
209
+ return self._get_closed_form_lr()
210
+
211
+ def _get_closed_form_lr(self):
212
+ warmup = 1 - self.warmup ** (self.last_epoch + 1)
213
+ lr_mult = (self.decay ** (1 / self.num_steps)) ** self.last_epoch
214
+ return [warmup * max(self.min_lr, base_lr * lr_mult)
215
+ for base_lr in self.base_lrs]
216
+
217
+
218
+ def rand_log_normal(shape, loc=0., scale=1., device='cpu', dtype=torch.float32):
219
+ """Draws samples from an lognormal distribution."""
220
+ return (torch.randn(shape, device=device, dtype=dtype) * scale + loc).exp()
221
+
222
+
223
+ def rand_log_logistic(shape, loc=0., scale=1., min_value=0., max_value=float('inf'), device='cpu', dtype=torch.float32):
224
+ """Draws samples from an optionally truncated log-logistic distribution."""
225
+ min_value = torch.as_tensor(min_value, device=device, dtype=torch.float64)
226
+ max_value = torch.as_tensor(max_value, device=device, dtype=torch.float64)
227
+ min_cdf = min_value.log().sub(loc).div(scale).sigmoid()
228
+ max_cdf = max_value.log().sub(loc).div(scale).sigmoid()
229
+ u = torch.rand(shape, device=device, dtype=torch.float64) * (max_cdf - min_cdf) + min_cdf
230
+ return u.logit().mul(scale).add(loc).exp().to(dtype)
231
+
232
+
233
+ def rand_log_uniform(shape, min_value, max_value, device='cpu', dtype=torch.float32):
234
+ """Draws samples from an log-uniform distribution."""
235
+ min_value = math.log(min_value)
236
+ max_value = math.log(max_value)
237
+ return (torch.rand(shape, device=device, dtype=dtype) * (max_value - min_value) + min_value).exp()
238
+
239
+
240
+ def rand_v_diffusion(shape, sigma_data=1., min_value=0., max_value=float('inf'), device='cpu', dtype=torch.float32):
241
+ """Draws samples from a truncated v-diffusion training timestep distribution."""
242
+ min_cdf = math.atan(min_value / sigma_data) * 2 / math.pi
243
+ max_cdf = math.atan(max_value / sigma_data) * 2 / math.pi
244
+ u = torch.rand(shape, device=device, dtype=dtype) * (max_cdf - min_cdf) + min_cdf
245
+ return torch.tan(u * math.pi / 2) * sigma_data
246
+
247
+
248
+ def rand_split_log_normal(shape, loc, scale_1, scale_2, device='cpu', dtype=torch.float32):
249
+ """Draws samples from a split lognormal distribution."""
250
+ n = torch.randn(shape, device=device, dtype=dtype).abs()
251
+ u = torch.rand(shape, device=device, dtype=dtype)
252
+ n_left = n * -scale_1 + loc
253
+ n_right = n * scale_2 + loc
254
+ ratio = scale_1 / (scale_1 + scale_2)
255
+ return torch.where(u < ratio, n_left, n_right).exp()
256
+
257
+
258
+ class FolderOfImages(data.Dataset):
259
+ """Recursively finds all images in a directory. It does not support
260
+ classes/targets."""
261
+
262
+ IMG_EXTENSIONS = {'.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif', '.tiff', '.webp'}
263
+
264
+ def __init__(self, root, transform=None):
265
+ super().__init__()
266
+ self.root = Path(root)
267
+ self.transform = nn.Identity() if transform is None else transform
268
+ self.paths = sorted(path for path in self.root.rglob('*') if path.suffix.lower() in self.IMG_EXTENSIONS)
269
+
270
+ def __repr__(self):
271
+ return f'FolderOfImages(root="{self.root}", len: {len(self)})'
272
+
273
+ def __len__(self):
274
+ return len(self.paths)
275
+
276
+ def __getitem__(self, key):
277
+ path = self.paths[key]
278
+ with open(path, 'rb') as f:
279
+ image = Image.open(f).convert('RGB')
280
+ image = self.transform(image)
281
+ return image,
282
+
283
+
284
+ class CSVLogger:
285
+ def __init__(self, filename, columns):
286
+ self.filename = Path(filename)
287
+ self.columns = columns
288
+ if self.filename.exists():
289
+ self.file = open(self.filename, 'a')
290
+ else:
291
+ self.file = open(self.filename, 'w')
292
+ self.write(*self.columns)
293
+
294
+ def write(self, *args):
295
+ print(*args, sep=',', file=self.file, flush=True)
296
+
297
+
298
+ @contextmanager
299
+ def tf32_mode(cudnn=None, matmul=None):
300
+ """A context manager that sets whether TF32 is allowed on cuDNN or matmul."""
301
+ cudnn_old = torch.backends.cudnn.allow_tf32
302
+ matmul_old = torch.backends.cuda.matmul.allow_tf32
303
+ try:
304
+ if cudnn is not None:
305
+ torch.backends.cudnn.allow_tf32 = cudnn
306
+ if matmul is not None:
307
+ torch.backends.cuda.matmul.allow_tf32 = matmul
308
+ yield
309
+ finally:
310
+ if cudnn is not None:
311
+ torch.backends.cudnn.allow_tf32 = cudnn_old
312
+ if matmul is not None:
313
+ torch.backends.cuda.matmul.allow_tf32 = matmul_old
comfy/latent_formats.py ADDED
@@ -0,0 +1,409 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ class LatentFormat:
4
+ scale_factor = 1.0
5
+ latent_channels = 4
6
+ latent_dimensions = 2
7
+ latent_rgb_factors = None
8
+ latent_rgb_factors_bias = None
9
+ taesd_decoder_name = None
10
+
11
+ def process_in(self, latent):
12
+ return latent * self.scale_factor
13
+
14
+ def process_out(self, latent):
15
+ return latent / self.scale_factor
16
+
17
+ class SD15(LatentFormat):
18
+ def __init__(self, scale_factor=0.18215):
19
+ self.scale_factor = scale_factor
20
+ self.latent_rgb_factors = [
21
+ # R G B
22
+ [ 0.3512, 0.2297, 0.3227],
23
+ [ 0.3250, 0.4974, 0.2350],
24
+ [-0.2829, 0.1762, 0.2721],
25
+ [-0.2120, -0.2616, -0.7177]
26
+ ]
27
+ self.taesd_decoder_name = "taesd_decoder"
28
+
29
+ class SDXL(LatentFormat):
30
+ scale_factor = 0.13025
31
+
32
+ def __init__(self):
33
+ self.latent_rgb_factors = [
34
+ # R G B
35
+ [ 0.3651, 0.4232, 0.4341],
36
+ [-0.2533, -0.0042, 0.1068],
37
+ [ 0.1076, 0.1111, -0.0362],
38
+ [-0.3165, -0.2492, -0.2188]
39
+ ]
40
+ self.latent_rgb_factors_bias = [ 0.1084, -0.0175, -0.0011]
41
+
42
+ self.taesd_decoder_name = "taesdxl_decoder"
43
+
44
+ class SDXL_Playground_2_5(LatentFormat):
45
+ def __init__(self):
46
+ self.scale_factor = 0.5
47
+ self.latents_mean = torch.tensor([-1.6574, 1.886, -1.383, 2.5155]).view(1, 4, 1, 1)
48
+ self.latents_std = torch.tensor([8.4927, 5.9022, 6.5498, 5.2299]).view(1, 4, 1, 1)
49
+
50
+ self.latent_rgb_factors = [
51
+ # R G B
52
+ [ 0.3920, 0.4054, 0.4549],
53
+ [-0.2634, -0.0196, 0.0653],
54
+ [ 0.0568, 0.1687, -0.0755],
55
+ [-0.3112, -0.2359, -0.2076]
56
+ ]
57
+ self.taesd_decoder_name = "taesdxl_decoder"
58
+
59
+ def process_in(self, latent):
60
+ latents_mean = self.latents_mean.to(latent.device, latent.dtype)
61
+ latents_std = self.latents_std.to(latent.device, latent.dtype)
62
+ return (latent - latents_mean) * self.scale_factor / latents_std
63
+
64
+ def process_out(self, latent):
65
+ latents_mean = self.latents_mean.to(latent.device, latent.dtype)
66
+ latents_std = self.latents_std.to(latent.device, latent.dtype)
67
+ return latent * latents_std / self.scale_factor + latents_mean
68
+
69
+
70
+ class SD_X4(LatentFormat):
71
+ def __init__(self):
72
+ self.scale_factor = 0.08333
73
+ self.latent_rgb_factors = [
74
+ [-0.2340, -0.3863, -0.3257],
75
+ [ 0.0994, 0.0885, -0.0908],
76
+ [-0.2833, -0.2349, -0.3741],
77
+ [ 0.2523, -0.0055, -0.1651]
78
+ ]
79
+
80
+ class SC_Prior(LatentFormat):
81
+ latent_channels = 16
82
+ def __init__(self):
83
+ self.scale_factor = 1.0
84
+ self.latent_rgb_factors = [
85
+ [-0.0326, -0.0204, -0.0127],
86
+ [-0.1592, -0.0427, 0.0216],
87
+ [ 0.0873, 0.0638, -0.0020],
88
+ [-0.0602, 0.0442, 0.1304],
89
+ [ 0.0800, -0.0313, -0.1796],
90
+ [-0.0810, -0.0638, -0.1581],
91
+ [ 0.1791, 0.1180, 0.0967],
92
+ [ 0.0740, 0.1416, 0.0432],
93
+ [-0.1745, -0.1888, -0.1373],
94
+ [ 0.2412, 0.1577, 0.0928],
95
+ [ 0.1908, 0.0998, 0.0682],
96
+ [ 0.0209, 0.0365, -0.0092],
97
+ [ 0.0448, -0.0650, -0.1728],
98
+ [-0.1658, -0.1045, -0.1308],
99
+ [ 0.0542, 0.1545, 0.1325],
100
+ [-0.0352, -0.1672, -0.2541]
101
+ ]
102
+
103
+ class SC_B(LatentFormat):
104
+ def __init__(self):
105
+ self.scale_factor = 1.0 / 0.43
106
+ self.latent_rgb_factors = [
107
+ [ 0.1121, 0.2006, 0.1023],
108
+ [-0.2093, -0.0222, -0.0195],
109
+ [-0.3087, -0.1535, 0.0366],
110
+ [ 0.0290, -0.1574, -0.4078]
111
+ ]
112
+
113
+ class SD3(LatentFormat):
114
+ latent_channels = 16
115
+ def __init__(self):
116
+ self.scale_factor = 1.5305
117
+ self.shift_factor = 0.0609
118
+ self.latent_rgb_factors = [
119
+ [-0.0922, -0.0175, 0.0749],
120
+ [ 0.0311, 0.0633, 0.0954],
121
+ [ 0.1994, 0.0927, 0.0458],
122
+ [ 0.0856, 0.0339, 0.0902],
123
+ [ 0.0587, 0.0272, -0.0496],
124
+ [-0.0006, 0.1104, 0.0309],
125
+ [ 0.0978, 0.0306, 0.0427],
126
+ [-0.0042, 0.1038, 0.1358],
127
+ [-0.0194, 0.0020, 0.0669],
128
+ [-0.0488, 0.0130, -0.0268],
129
+ [ 0.0922, 0.0988, 0.0951],
130
+ [-0.0278, 0.0524, -0.0542],
131
+ [ 0.0332, 0.0456, 0.0895],
132
+ [-0.0069, -0.0030, -0.0810],
133
+ [-0.0596, -0.0465, -0.0293],
134
+ [-0.1448, -0.1463, -0.1189]
135
+ ]
136
+ self.latent_rgb_factors_bias = [0.2394, 0.2135, 0.1925]
137
+ self.taesd_decoder_name = "taesd3_decoder"
138
+
139
+ def process_in(self, latent):
140
+ return (latent - self.shift_factor) * self.scale_factor
141
+
142
+ def process_out(self, latent):
143
+ return (latent / self.scale_factor) + self.shift_factor
144
+
145
+ class StableAudio1(LatentFormat):
146
+ latent_channels = 64
147
+ latent_dimensions = 1
148
+
149
+ class Flux(SD3):
150
+ latent_channels = 16
151
+ def __init__(self):
152
+ self.scale_factor = 0.3611
153
+ self.shift_factor = 0.1159
154
+ self.latent_rgb_factors =[
155
+ [-0.0346, 0.0244, 0.0681],
156
+ [ 0.0034, 0.0210, 0.0687],
157
+ [ 0.0275, -0.0668, -0.0433],
158
+ [-0.0174, 0.0160, 0.0617],
159
+ [ 0.0859, 0.0721, 0.0329],
160
+ [ 0.0004, 0.0383, 0.0115],
161
+ [ 0.0405, 0.0861, 0.0915],
162
+ [-0.0236, -0.0185, -0.0259],
163
+ [-0.0245, 0.0250, 0.1180],
164
+ [ 0.1008, 0.0755, -0.0421],
165
+ [-0.0515, 0.0201, 0.0011],
166
+ [ 0.0428, -0.0012, -0.0036],
167
+ [ 0.0817, 0.0765, 0.0749],
168
+ [-0.1264, -0.0522, -0.1103],
169
+ [-0.0280, -0.0881, -0.0499],
170
+ [-0.1262, -0.0982, -0.0778]
171
+ ]
172
+ self.latent_rgb_factors_bias = [-0.0329, -0.0718, -0.0851]
173
+ self.taesd_decoder_name = "taef1_decoder"
174
+
175
+ def process_in(self, latent):
176
+ return (latent - self.shift_factor) * self.scale_factor
177
+
178
+ def process_out(self, latent):
179
+ return (latent / self.scale_factor) + self.shift_factor
180
+
181
+ class Mochi(LatentFormat):
182
+ latent_channels = 12
183
+ latent_dimensions = 3
184
+
185
+ def __init__(self):
186
+ self.scale_factor = 1.0
187
+ self.latents_mean = torch.tensor([-0.06730895953510081, -0.038011381506090416, -0.07477820912866141,
188
+ -0.05565264470995561, 0.012767231469026969, -0.04703542746246419,
189
+ 0.043896967884726704, -0.09346305707025976, -0.09918314763016893,
190
+ -0.008729793427399178, -0.011931556316503654, -0.0321993391887285]).view(1, self.latent_channels, 1, 1, 1)
191
+ self.latents_std = torch.tensor([0.9263795028493863, 0.9248894543193766, 0.9393059390890617,
192
+ 0.959253732819592, 0.8244560132752793, 0.917259975397747,
193
+ 0.9294154431013696, 1.3720942357788521, 0.881393668867029,
194
+ 0.9168315692124348, 0.9185249279345552, 0.9274757570805041]).view(1, self.latent_channels, 1, 1, 1)
195
+
196
+ self.latent_rgb_factors =[
197
+ [-0.0069, -0.0045, 0.0018],
198
+ [ 0.0154, -0.0692, -0.0274],
199
+ [ 0.0333, 0.0019, 0.0206],
200
+ [-0.1390, 0.0628, 0.1678],
201
+ [-0.0725, 0.0134, -0.1898],
202
+ [ 0.0074, -0.0270, -0.0209],
203
+ [-0.0176, -0.0277, -0.0221],
204
+ [ 0.5294, 0.5204, 0.3852],
205
+ [-0.0326, -0.0446, -0.0143],
206
+ [-0.0659, 0.0153, -0.0153],
207
+ [ 0.0185, -0.0217, 0.0014],
208
+ [-0.0396, -0.0495, -0.0281]
209
+ ]
210
+ self.latent_rgb_factors_bias = [-0.0940, -0.1418, -0.1453]
211
+ self.taesd_decoder_name = None #TODO
212
+
213
+ def process_in(self, latent):
214
+ latents_mean = self.latents_mean.to(latent.device, latent.dtype)
215
+ latents_std = self.latents_std.to(latent.device, latent.dtype)
216
+ return (latent - latents_mean) * self.scale_factor / latents_std
217
+
218
+ def process_out(self, latent):
219
+ latents_mean = self.latents_mean.to(latent.device, latent.dtype)
220
+ latents_std = self.latents_std.to(latent.device, latent.dtype)
221
+ return latent * latents_std / self.scale_factor + latents_mean
222
+
223
+ class LTXV(LatentFormat):
224
+ latent_channels = 128
225
+ latent_dimensions = 3
226
+
227
+ def __init__(self):
228
+ self.latent_rgb_factors = [
229
+ [ 1.1202e-02, -6.3815e-04, -1.0021e-02],
230
+ [ 8.6031e-02, 6.5813e-02, 9.5409e-04],
231
+ [-1.2576e-02, -7.5734e-03, -4.0528e-03],
232
+ [ 9.4063e-03, -2.1688e-03, 2.6093e-03],
233
+ [ 3.7636e-03, 1.2765e-02, 9.1548e-03],
234
+ [ 2.1024e-02, -5.2973e-03, 3.4373e-03],
235
+ [-8.8896e-03, -1.9703e-02, -1.8761e-02],
236
+ [-1.3160e-02, -1.0523e-02, 1.9709e-03],
237
+ [-1.5152e-03, -6.9891e-03, -7.5810e-03],
238
+ [-1.7247e-03, 4.6560e-04, -3.3839e-03],
239
+ [ 1.3617e-02, 4.7077e-03, -2.0045e-03],
240
+ [ 1.0256e-02, 7.7318e-03, 1.3948e-02],
241
+ [-1.6108e-02, -6.2151e-03, 1.1561e-03],
242
+ [ 7.3407e-03, 1.5628e-02, 4.4865e-04],
243
+ [ 9.5357e-04, -2.9518e-03, -1.4760e-02],
244
+ [ 1.9143e-02, 1.0868e-02, 1.2264e-02],
245
+ [ 4.4575e-03, 3.6682e-05, -6.8508e-03],
246
+ [-4.5681e-04, 3.2570e-03, 7.7929e-03],
247
+ [ 3.3902e-02, 3.3405e-02, 3.7454e-02],
248
+ [-2.3001e-02, -2.4877e-03, -3.1033e-03],
249
+ [ 5.0265e-02, 3.8841e-02, 3.3539e-02],
250
+ [-4.1018e-03, -1.1095e-03, 1.5859e-03],
251
+ [-1.2689e-01, -1.3107e-01, -2.1005e-01],
252
+ [ 2.6276e-02, 1.4189e-02, -3.5963e-03],
253
+ [-4.8679e-03, 8.8486e-03, 7.8029e-03],
254
+ [-1.6610e-03, -4.8597e-03, -5.2060e-03],
255
+ [-2.1010e-03, 2.3610e-03, 9.3796e-03],
256
+ [-2.2482e-02, -2.1305e-02, -1.5087e-02],
257
+ [-1.5753e-02, -1.0646e-02, -6.5083e-03],
258
+ [-4.6975e-03, 5.0288e-03, -6.7390e-03],
259
+ [ 1.1951e-02, 2.0712e-02, 1.6191e-02],
260
+ [-6.3704e-03, -8.4827e-03, -9.5483e-03],
261
+ [ 7.2610e-03, -9.9326e-03, -2.2978e-02],
262
+ [-9.1904e-04, 6.2882e-03, 9.5720e-03],
263
+ [-3.7178e-02, -3.7123e-02, -5.6713e-02],
264
+ [-1.3373e-01, -1.0720e-01, -5.3801e-02],
265
+ [-5.3702e-03, 8.1256e-03, 8.8397e-03],
266
+ [-1.5247e-01, -2.1437e-01, -2.1843e-01],
267
+ [ 3.1441e-02, 7.0335e-03, -9.7541e-03],
268
+ [ 2.1528e-03, -8.9817e-03, -2.1023e-02],
269
+ [ 3.8461e-03, -5.8957e-03, -1.5014e-02],
270
+ [-4.3470e-03, -1.2940e-02, -1.5972e-02],
271
+ [-5.4781e-03, -1.0842e-02, -3.0204e-03],
272
+ [-6.5347e-03, 3.0806e-03, -1.0163e-02],
273
+ [-5.0414e-03, -7.1503e-03, -8.9686e-04],
274
+ [-8.5851e-03, -2.4351e-03, 1.0674e-03],
275
+ [-9.0016e-03, -9.6493e-03, 1.5692e-03],
276
+ [ 5.0914e-03, 1.2099e-02, 1.9968e-02],
277
+ [ 1.3758e-02, 1.1669e-02, 8.1958e-03],
278
+ [-1.0518e-02, -1.1575e-02, -4.1307e-03],
279
+ [-2.8410e-02, -3.1266e-02, -2.2149e-02],
280
+ [ 2.9336e-03, 3.6511e-02, 1.8717e-02],
281
+ [-1.6703e-02, -1.6696e-02, -4.4529e-03],
282
+ [ 4.8818e-02, 4.0063e-02, 8.7410e-03],
283
+ [-1.5066e-02, -5.7328e-04, 2.9785e-03],
284
+ [-1.7613e-02, -8.1034e-03, 1.3086e-02],
285
+ [-9.2633e-03, 1.0803e-02, -6.3489e-03],
286
+ [ 3.0851e-03, 4.7750e-04, 1.2347e-02],
287
+ [-2.2785e-02, -2.3043e-02, -2.6005e-02],
288
+ [-2.4787e-02, -1.5389e-02, -2.2104e-02],
289
+ [-2.3572e-02, 1.0544e-03, 1.2361e-02],
290
+ [-7.8915e-03, -1.2271e-03, -6.0968e-03],
291
+ [-1.1478e-02, -1.2543e-03, 6.2679e-03],
292
+ [-5.4229e-02, 2.6644e-02, 6.3394e-03],
293
+ [ 4.4216e-03, -7.3338e-03, -1.0464e-02],
294
+ [-4.5013e-03, 1.6082e-03, 1.4420e-02],
295
+ [ 1.3673e-02, 8.8877e-03, 4.1253e-03],
296
+ [-1.0145e-02, 9.0072e-03, 1.5695e-02],
297
+ [-5.6234e-03, 1.1847e-03, 8.1261e-03],
298
+ [-3.7171e-03, -5.3538e-03, 1.2590e-03],
299
+ [ 2.9476e-02, 2.1424e-02, 3.0424e-02],
300
+ [-3.4925e-02, -2.4340e-02, -2.5316e-02],
301
+ [-3.4127e-02, -2.2406e-02, -1.0589e-02],
302
+ [-1.7342e-02, -1.3249e-02, -1.0719e-02],
303
+ [-2.1478e-03, -8.6051e-03, -2.9878e-03],
304
+ [ 1.2089e-03, -4.2391e-03, -6.8569e-03],
305
+ [ 9.0411e-04, -6.6886e-03, -6.7547e-05],
306
+ [ 1.6048e-02, -1.0057e-02, -2.8929e-02],
307
+ [ 1.2290e-03, 1.0163e-02, 1.8861e-02],
308
+ [ 1.7264e-02, 2.7257e-04, 1.3785e-02],
309
+ [-1.3482e-02, -3.6427e-03, 6.7481e-04],
310
+ [ 4.6782e-03, -5.2423e-03, 2.4467e-03],
311
+ [-5.9113e-03, -6.2244e-03, -1.8162e-03],
312
+ [ 1.5496e-02, 1.4582e-02, 1.9514e-03],
313
+ [ 7.4958e-03, 1.5886e-03, -8.2305e-03],
314
+ [ 1.9086e-02, 1.6360e-03, -3.9674e-03],
315
+ [-5.7021e-03, -2.7307e-03, -4.1066e-03],
316
+ [ 1.7450e-03, 1.4602e-02, 2.5794e-02],
317
+ [-8.2788e-04, 2.2902e-03, 4.5161e-03],
318
+ [ 1.1632e-02, 8.9193e-03, -7.2813e-03],
319
+ [ 7.5721e-03, 2.6784e-03, 1.1393e-02],
320
+ [ 5.1939e-03, 3.6903e-03, 1.4049e-02],
321
+ [-1.8383e-02, -2.2529e-02, -2.4477e-02],
322
+ [ 5.8842e-04, -5.7874e-03, -1.4770e-02],
323
+ [-1.6125e-02, -8.6101e-03, -1.4533e-02],
324
+ [ 2.0540e-02, 2.0729e-02, 6.4338e-03],
325
+ [ 3.3587e-03, -1.1226e-02, -1.6444e-02],
326
+ [-1.4742e-03, -1.0489e-02, 1.7097e-03],
327
+ [ 2.8130e-02, 2.3546e-02, 3.2791e-02],
328
+ [-1.8532e-02, -1.2842e-02, -8.7756e-03],
329
+ [-8.0533e-03, -1.0771e-02, -1.7536e-02],
330
+ [-3.9009e-03, 1.6150e-02, 3.3359e-02],
331
+ [-7.4554e-03, -1.4154e-02, -6.1910e-03],
332
+ [ 3.4734e-03, -1.1370e-02, -1.0581e-02],
333
+ [ 1.1476e-02, 3.9281e-03, 2.8231e-03],
334
+ [ 7.1639e-03, -1.4741e-03, -3.8066e-03],
335
+ [ 2.2250e-03, -8.7552e-03, -9.5719e-03],
336
+ [ 2.4146e-02, 2.1696e-02, 2.8056e-02],
337
+ [-5.4365e-03, -2.4291e-02, -1.7802e-02],
338
+ [ 7.4263e-03, 1.0510e-02, 1.2705e-02],
339
+ [ 6.2669e-03, 6.2658e-03, 1.9211e-02],
340
+ [ 1.6378e-02, 9.4933e-03, 6.6971e-03],
341
+ [ 1.7173e-02, 2.3601e-02, 2.3296e-02],
342
+ [-1.4568e-02, -9.8279e-03, -1.1556e-02],
343
+ [ 1.4431e-02, 1.4430e-02, 6.6362e-03],
344
+ [-6.8230e-03, 1.8863e-02, 1.4555e-02],
345
+ [ 6.1156e-03, 3.4700e-03, -2.6662e-03],
346
+ [-2.6983e-03, -5.9402e-03, -9.2276e-03],
347
+ [ 1.0235e-02, 7.4173e-03, -7.6243e-03],
348
+ [-1.3255e-02, 1.9322e-02, -9.2153e-04],
349
+ [ 2.4222e-03, -4.8039e-03, -1.5759e-02],
350
+ [ 2.6244e-02, 2.5951e-02, 2.0249e-02],
351
+ [ 1.5711e-02, 1.8498e-02, 2.7407e-03],
352
+ [-2.1714e-03, 4.7214e-03, -2.2443e-02],
353
+ [-7.4747e-03, 7.4166e-03, 1.4430e-02],
354
+ [-8.3906e-03, -7.9776e-03, 9.7927e-03],
355
+ [ 3.8321e-02, 9.6622e-03, -1.9268e-02],
356
+ [-1.4605e-02, -6.7032e-03, 3.9675e-03]
357
+ ]
358
+
359
+ self.latent_rgb_factors_bias = [-0.0571, -0.1657, -0.2512]
360
+
361
+ class HunyuanVideo(LatentFormat):
362
+ latent_channels = 16
363
+ latent_dimensions = 3
364
+ scale_factor = 0.476986
365
+ latent_rgb_factors = [
366
+ [-0.0395, -0.0331, 0.0445],
367
+ [ 0.0696, 0.0795, 0.0518],
368
+ [ 0.0135, -0.0945, -0.0282],
369
+ [ 0.0108, -0.0250, -0.0765],
370
+ [-0.0209, 0.0032, 0.0224],
371
+ [-0.0804, -0.0254, -0.0639],
372
+ [-0.0991, 0.0271, -0.0669],
373
+ [-0.0646, -0.0422, -0.0400],
374
+ [-0.0696, -0.0595, -0.0894],
375
+ [-0.0799, -0.0208, -0.0375],
376
+ [ 0.1166, 0.1627, 0.0962],
377
+ [ 0.1165, 0.0432, 0.0407],
378
+ [-0.2315, -0.1920, -0.1355],
379
+ [-0.0270, 0.0401, -0.0821],
380
+ [-0.0616, -0.0997, -0.0727],
381
+ [ 0.0249, -0.0469, -0.1703]
382
+ ]
383
+
384
+ latent_rgb_factors_bias = [ 0.0259, -0.0192, -0.0761]
385
+
386
+ class Cosmos1CV8x8x8(LatentFormat):
387
+ latent_channels = 16
388
+ latent_dimensions = 3
389
+
390
+ latent_rgb_factors = [
391
+ [ 0.1817, 0.2284, 0.2423],
392
+ [-0.0586, -0.0862, -0.3108],
393
+ [-0.4703, -0.4255, -0.3995],
394
+ [ 0.0803, 0.1963, 0.1001],
395
+ [-0.0820, -0.1050, 0.0400],
396
+ [ 0.2511, 0.3098, 0.2787],
397
+ [-0.1830, -0.2117, -0.0040],
398
+ [-0.0621, -0.2187, -0.0939],
399
+ [ 0.3619, 0.1082, 0.1455],
400
+ [ 0.3164, 0.3922, 0.2575],
401
+ [ 0.1152, 0.0231, -0.0462],
402
+ [-0.1434, -0.3609, -0.3665],
403
+ [ 0.0635, 0.1471, 0.1680],
404
+ [-0.3635, -0.1963, -0.3248],
405
+ [-0.1865, 0.0365, 0.2346],
406
+ [ 0.0447, 0.0994, 0.0881]
407
+ ]
408
+
409
+ latent_rgb_factors_bias = [-0.1223, -0.1889, -0.1976]