diff --git a/.gradio/certificate.pem b/.gradio/certificate.pem new file mode 100644 index 0000000000000000000000000000000000000000..b85c8037f6b60976b2546fdbae88312c5246d9a3 --- /dev/null +++ b/.gradio/certificate.pem @@ -0,0 +1,31 @@ +-----BEGIN CERTIFICATE----- +MIIFazCCA1OgAwIBAgIRAIIQz7DSQONZRGPgu2OCiwAwDQYJKoZIhvcNAQELBQAw +TzELMAkGA1UEBhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2Vh +cmNoIEdyb3VwMRUwEwYDVQQDEwxJU1JHIFJvb3QgWDEwHhcNMTUwNjA0MTEwNDM4 +WhcNMzUwNjA0MTEwNDM4WjBPMQswCQYDVQQGEwJVUzEpMCcGA1UEChMgSW50ZXJu +ZXQgU2VjdXJpdHkgUmVzZWFyY2ggR3JvdXAxFTATBgNVBAMTDElTUkcgUm9vdCBY +MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK3oJHP0FDfzm54rVygc +h77ct984kIxuPOZXoHj3dcKi/vVqbvYATyjb3miGbESTtrFj/RQSa78f0uoxmyF+ +0TM8ukj13Xnfs7j/EvEhmkvBioZxaUpmZmyPfjxwv60pIgbz5MDmgK7iS4+3mX6U +A5/TR5d8mUgjU+g4rk8Kb4Mu0UlXjIB0ttov0DiNewNwIRt18jA8+o+u3dpjq+sW +T8KOEUt+zwvo/7V3LvSye0rgTBIlDHCNAymg4VMk7BPZ7hm/ELNKjD+Jo2FR3qyH +B5T0Y3HsLuJvW5iB4YlcNHlsdu87kGJ55tukmi8mxdAQ4Q7e2RCOFvu396j3x+UC +B5iPNgiV5+I3lg02dZ77DnKxHZu8A/lJBdiB3QW0KtZB6awBdpUKD9jf1b0SHzUv +KBds0pjBqAlkd25HN7rOrFleaJ1/ctaJxQZBKT5ZPt0m9STJEadao0xAH0ahmbWn +OlFuhjuefXKnEgV4We0+UXgVCwOPjdAvBbI+e0ocS3MFEvzG6uBQE3xDk3SzynTn +jh8BCNAw1FtxNrQHusEwMFxIt4I7mKZ9YIqioymCzLq9gwQbooMDQaHWBfEbwrbw +qHyGO0aoSCqI3Haadr8faqU9GY/rOPNk3sgrDQoo//fb4hVC1CLQJ13hef4Y53CI +rU7m2Ys6xt0nUW7/vGT1M0NPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV +HRMBAf8EBTADAQH/MB0GA1UdDgQWBBR5tFnme7bl5AFzgAiIyBpY9umbbjANBgkq +hkiG9w0BAQsFAAOCAgEAVR9YqbyyqFDQDLHYGmkgJykIrGF1XIpu+ILlaS/V9lZL +ubhzEFnTIZd+50xx+7LSYK05qAvqFyFWhfFQDlnrzuBZ6brJFe+GnY+EgPbk6ZGQ +3BebYhtF8GaV0nxvwuo77x/Py9auJ/GpsMiu/X1+mvoiBOv/2X/qkSsisRcOj/KK +NFtY2PwByVS5uCbMiogziUwthDyC3+6WVwW6LLv3xLfHTjuCvjHIInNzktHCgKQ5 +ORAzI4JMPJ+GslWYHb4phowim57iaztXOoJwTdwJx4nLCgdNbOhdjsnvzqvHu7Ur +TkXWStAmzOVyyghqpZXjFaH3pO3JLF+l+/+sKAIuvtd7u+Nxe5AW0wdeRlN8NwdC +jNPElpzVmbUq4JUagEiuTDkHzsxHpFKVK7q4+63SM1N95R1NbdWhscdCb+ZAJzVc +oyi3B43njTOQ5yOf+1CceWxG1bQVs5ZufpsMljq4Ui0/1lvh+wjChP4kqKOJ2qxq +4RgqsahDYVvTH9w7jXbyLeiNdd8XM2w9U/t7y0Ff/9yi0GE44Za4rF2LN9d11TPA +mRGunUHBcnWEvgJBQl9nJEiU0Zsnvgc/ubhPgXRR4Xq37Z0j4r7g1SgEEzwxA57d +emyPxgcYxn/eR44/KJ4EBs+lVDR3veyJm+kXQ99b21/+jh5Xos1AnX5iItreGCc= +-----END CERTIFICATE----- diff --git a/__init__.py b/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/app.py b/app.py index 0aed6bee44c0185e68afe4c10621981adeb77d47..88d452d5b541f0163294b2ec3f651c1f035b0a9a 100644 --- a/app.py +++ b/app.py @@ -1,15 +1,32 @@ import os import sys + +sys.path.insert(0, os.path.dirname(__file__)) + from typing import Any, Mapping, Sequence, Union import gradio as gr +import spaces import torch from huggingface_hub import hf_hub_download + from nodes import NODE_CLASS_MAPPINGS -from comfy import model_management -# import spaces -# @spaces.GPU(duration=60) #modify the duration for the average it takes for your worflow to run, in seconds +hf_hub_download( + repo_id="uwg/upscaler", + filename="ESRGAN/4x_NMKD-Siax_200k.pth", + local_dir="models/upscale_models", +) +hf_hub_download( + repo_id="ezioruan/inswapper_128.onnx", + filename="inswapper_128.onnx", + local_dir="models/insightface", +) +hf_hub_download( + repo_id="ziixzz/codeformer-v0.1.0.pth", + filename="codeformer-v0.1.0.pth", + local_dir="models/facerestore_models", +) def get_value_at_index(obj: Union[Sequence, Mapping], index: int) -> Any: @@ -76,11 +93,8 @@ def add_extra_model_paths() -> None: """ Parse the optional extra_model_paths.yaml file and add the parsed paths to the sys.path. """ - try: - from app import load_extra_path_config - except ImportError: - print("Could not import load_extra_path_config from main.py. Looking in utils.extra_config instead.") - from utils.extra_config import load_extra_path_config + from utils.extra_config import load_extra_path_config + extra_model_paths = find_path("extra_model_paths.yaml") if extra_model_paths is not None: @@ -100,9 +114,11 @@ def import_custom_nodes() -> None: creates a PromptQueue, and initializes the custom nodes. """ import asyncio + import execution - from nodes import init_extra_nodes import server + from nodes import init_extra_nodes + # Creating a new event loop and setting it as the default loop loop = asyncio.new_event_loop() asyncio.set_event_loop(loop) @@ -115,6 +131,7 @@ def import_custom_nodes() -> None: init_extra_nodes() +@spaces.GPU(duration=360) def advance_blur(input_image): import_custom_nodes() with torch.inference_mode(): @@ -136,7 +153,7 @@ def advance_blur(input_image): upscalemodelloader = NODE_CLASS_MAPPINGS["UpscaleModelLoader"]() upscale_model = upscalemodelloader.load_model( - model_name="4x_NMKD-Siax_200k.pth" + model_name="ESRGAN/4x_NMKD-Siax_200k.pth" ) reactorbuildfacemodel = NODE_CLASS_MAPPINGS["ReActorBuildFaceModel"]() @@ -214,7 +231,7 @@ if __name__ == "__main__": with gr.Column(): input_image = gr.Image(label="Input Image", type="filepath") generate_btn = gr.Button("Generate") - + with gr.Column(): # The output image output_image = gr.Image(label="Generated Image") @@ -222,9 +239,6 @@ if __name__ == "__main__": # When clicking the button, it will trigger the `generate_image` function, with the respective inputs # and the output an image generate_btn.click( - fn=advance_blur, - inputs=[input_image], - outputs=[output_image] + fn=advance_blur, inputs=[input_image], outputs=[output_image] ) app.launch(share=True) - diff --git a/custom_nodes/ComfyUI-KJNodes-main/.gitignore b/custom_nodes/ComfyUI-KJNodes-main/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..d462a62fdd67858a783934170db4091ea95eb18f --- /dev/null +++ b/custom_nodes/ComfyUI-KJNodes-main/.gitignore @@ -0,0 +1,11 @@ +__pycache__ +/venv +*.code-workspace +.history +.vscode +*.ckpt +*.pth +types +models +jsconfig.json +custom_dimensions.json diff --git a/custom_nodes/ComfyUI-KJNodes-main/LICENSE b/custom_nodes/ComfyUI-KJNodes-main/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..f288702d2fa16d3cdf0035b15a9fcbc552cd88e7 --- /dev/null +++ b/custom_nodes/ComfyUI-KJNodes-main/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/custom_nodes/ComfyUI-KJNodes-main/README.md b/custom_nodes/ComfyUI-KJNodes-main/README.md new file mode 100644 index 0000000000000000000000000000000000000000..6371f5014823bc66ffd1f378fea8715a7dd590ff --- /dev/null +++ b/custom_nodes/ComfyUI-KJNodes-main/README.md @@ -0,0 +1,65 @@ +# KJNodes for ComfyUI + +Various quality of life and masking related -nodes and scripts made by combining functionality of existing nodes for ComfyUI. + +I know I'm bad at documentation, especially this project that has grown from random practice nodes to... too many lines in one file. +I have however started to add descriptions to the nodes themselves, there's a small ? you can click for info what the node does. +This is still work in progress, like everything else. + +# Installation +1. Clone this repo into `custom_nodes` folder. +2. Install dependencies: `pip install -r requirements.txt` + or if you use the portable install, run this in ComfyUI_windows_portable -folder: + + `python_embeded\python.exe -m pip install -r ComfyUI\custom_nodes\ComfyUI-KJNodes\requirements.txt` + + +## Javascript + +### browserstatus.js +Sets the favicon to green circle when not processing anything, sets it to red when processing and shows progress percentage and the length of your queue. +Default off, needs to be enabled from options, overrides Custom-Scripts favicon when enabled. + +## Nodes: + +### Set/Get + +Javascript nodes to set and get constants to reduce unnecessary lines. Takes in and returns anything, purely visual nodes. +On the right click menu of these nodes there's now an options to visualize the paths, as well as option to jump to the corresponding node on the other end. + +**Known limitations**: + - Will not work with any node that dynamically sets it's outpute, such as reroute or other Set/Get node + - Will not work when directly connected to a bypassed node + - Other possible conflicts with javascript based nodes. + +### ColorToMask + +RBG color value to mask, works with batches and AnimateDiff. + +### ConditioningMultiCombine + +Combine any number of conditions, saves space. + +### ConditioningSetMaskAndCombine + +Mask and combine two sets of conditions, saves space. + +### GrowMaskWithBlur + +Grows or shrinks (with negative values) mask, option to invert input, returns mask and inverted mask. Additionally Blurs the mask, this is a slow operation especially with big batches. + +### RoundMask + +![image](https://github.com/kijai/ComfyUI-KJNodes/assets/40791699/52c85202-f74e-4b96-9dac-c8bda5ddcc40) + +### WidgetToString +Outputs the value of a widget on any node as a string +![example of use](docs/images/2024-04-03_20_49_29-ComfyUI.png) + +Enable node id display from Manager menu, to get the ID of the node you want to read a widget from: +![enable node id display](docs/images/319121636-706b5081-9120-4a29-bd76-901691ada688.png) + +Use the node id of the target node, and add the name of the widget to read from +![use node id and widget name](docs/images/319121566-05f66385-7568-4b1f-8bbc-11053660b02f.png) + +Recreating or reloading the target node will change its id, and the WidgetToString node will no longer be able to find it until you update the node id value with the new id. diff --git a/custom_nodes/ComfyUI-KJNodes-main/__init__.py b/custom_nodes/ComfyUI-KJNodes-main/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..07fac2888c0cddcbbe665ad3f4984216b87a0a7a --- /dev/null +++ b/custom_nodes/ComfyUI-KJNodes-main/__init__.py @@ -0,0 +1,227 @@ +from .nodes.nodes import * +from .nodes.curve_nodes import * +from .nodes.batchcrop_nodes import * +from .nodes.audioscheduler_nodes import * +from .nodes.image_nodes import * +from .nodes.intrinsic_lora_nodes import * +from .nodes.mask_nodes import * +from .nodes.model_optimization_nodes import * +NODE_CONFIG = { + #constants + "BOOLConstant": {"class": BOOLConstant, "name": "BOOL Constant"}, + "INTConstant": {"class": INTConstant, "name": "INT Constant"}, + "FloatConstant": {"class": FloatConstant, "name": "Float Constant"}, + "StringConstant": {"class": StringConstant, "name": "String Constant"}, + "StringConstantMultiline": {"class": StringConstantMultiline, "name": "String Constant Multiline"}, + #conditioning + "ConditioningMultiCombine": {"class": ConditioningMultiCombine, "name": "Conditioning Multi Combine"}, + "ConditioningSetMaskAndCombine": {"class": ConditioningSetMaskAndCombine, "name": "ConditioningSetMaskAndCombine"}, + "ConditioningSetMaskAndCombine3": {"class": ConditioningSetMaskAndCombine3, "name": "ConditioningSetMaskAndCombine3"}, + "ConditioningSetMaskAndCombine4": {"class": ConditioningSetMaskAndCombine4, "name": "ConditioningSetMaskAndCombine4"}, + "ConditioningSetMaskAndCombine5": {"class": ConditioningSetMaskAndCombine5, "name": "ConditioningSetMaskAndCombine5"}, + "CondPassThrough": {"class": CondPassThrough}, + #masking + "DownloadAndLoadCLIPSeg": {"class": DownloadAndLoadCLIPSeg, "name": "(Down)load CLIPSeg"}, + "BatchCLIPSeg": {"class": BatchCLIPSeg, "name": "Batch CLIPSeg"}, + "ColorToMask": {"class": ColorToMask, "name": "Color To Mask"}, + "CreateGradientMask": {"class": CreateGradientMask, "name": "Create Gradient Mask"}, + "CreateTextMask": {"class": CreateTextMask, "name": "Create Text Mask"}, + "CreateAudioMask": {"class": CreateAudioMask, "name": "Create Audio Mask"}, + "CreateFadeMask": {"class": CreateFadeMask, "name": "Create Fade Mask"}, + "CreateFadeMaskAdvanced": {"class": CreateFadeMaskAdvanced, "name": "Create Fade Mask Advanced"}, + "CreateFluidMask": {"class": CreateFluidMask, "name": "Create Fluid Mask"}, + "CreateShapeMask": {"class": CreateShapeMask, "name": "Create Shape Mask"}, + "CreateVoronoiMask": {"class": CreateVoronoiMask, "name": "Create Voronoi Mask"}, + "CreateMagicMask": {"class": CreateMagicMask, "name": "Create Magic Mask"}, + "GetMaskSizeAndCount": {"class": GetMaskSizeAndCount, "name": "Get Mask Size & Count"}, + "GrowMaskWithBlur": {"class": GrowMaskWithBlur, "name": "Grow Mask With Blur"}, + "MaskBatchMulti": {"class": MaskBatchMulti, "name": "Mask Batch Multi"}, + "OffsetMask": {"class": OffsetMask, "name": "Offset Mask"}, + "RemapMaskRange": {"class": RemapMaskRange, "name": "Remap Mask Range"}, + "ResizeMask": {"class": ResizeMask, "name": "Resize Mask"}, + "RoundMask": {"class": RoundMask, "name": "Round Mask"}, + "SeparateMasks": {"class": SeparateMasks, "name": "Separate Masks"}, + #images + "AddLabel": {"class": AddLabel, "name": "Add Label"}, + "ColorMatch": {"class": ColorMatch, "name": "Color Match"}, + "ImageTensorList": {"class": ImageTensorList, "name": "Image Tensor List"}, + "CrossFadeImages": {"class": CrossFadeImages, "name": "Cross Fade Images"}, + "CrossFadeImagesMulti": {"class": CrossFadeImagesMulti, "name": "Cross Fade Images Multi"}, + "GetImagesFromBatchIndexed": {"class": GetImagesFromBatchIndexed, "name": "Get Images From Batch Indexed"}, + "GetImageRangeFromBatch": {"class": GetImageRangeFromBatch, "name": "Get Image or Mask Range From Batch"}, + "GetLatentRangeFromBatch": {"class": GetLatentRangeFromBatch, "name": "Get Latent Range From Batch"}, + "GetImageSizeAndCount": {"class": GetImageSizeAndCount, "name": "Get Image Size & Count"}, + "FastPreview": {"class": FastPreview, "name": "Fast Preview"}, + "ImageAndMaskPreview": {"class": ImageAndMaskPreview}, + "ImageAddMulti": {"class": ImageAddMulti, "name": "Image Add Multi"}, + "ImageBatchMulti": {"class": ImageBatchMulti, "name": "Image Batch Multi"}, + "ImageBatchRepeatInterleaving": {"class": ImageBatchRepeatInterleaving}, + "ImageBatchTestPattern": {"class": ImageBatchTestPattern, "name": "Image Batch Test Pattern"}, + "ImageConcanate": {"class": ImageConcanate, "name": "Image Concatenate"}, + "ImageConcatFromBatch": {"class": ImageConcatFromBatch, "name": "Image Concatenate From Batch"}, + "ImageConcatMulti": {"class": ImageConcatMulti, "name": "Image Concatenate Multi"}, + "ImageCropByMask": {"class": ImageCropByMask, "name": "Image Crop By Mask"}, + "ImageCropByMaskAndResize": {"class": ImageCropByMaskAndResize, "name": "Image Crop By Mask And Resize"}, + "ImageCropByMaskBatch": {"class": ImageCropByMaskBatch, "name": "Image Crop By Mask Batch"}, + "ImageUncropByMask": {"class": ImageUncropByMask, "name": "Image Uncrop By Mask"}, + "ImageGrabPIL": {"class": ImageGrabPIL, "name": "Image Grab PIL"}, + "ImageGridComposite2x2": {"class": ImageGridComposite2x2, "name": "Image Grid Composite 2x2"}, + "ImageGridComposite3x3": {"class": ImageGridComposite3x3, "name": "Image Grid Composite 3x3"}, + "ImageGridtoBatch": {"class": ImageGridtoBatch, "name": "Image Grid To Batch"}, + "ImageNoiseAugmentation": {"class": ImageNoiseAugmentation, "name": "Image Noise Augmentation"}, + "ImageNormalize_Neg1_To_1": {"class": ImageNormalize_Neg1_To_1, "name": "Image Normalize -1 to 1"}, + "ImagePass": {"class": ImagePass}, + "ImagePadKJ": {"class": ImagePadKJ, "name": "ImagePad KJ"}, + "ImagePadForOutpaintMasked": {"class": ImagePadForOutpaintMasked, "name": "Image Pad For Outpaint Masked"}, + "ImagePadForOutpaintTargetSize": {"class": ImagePadForOutpaintTargetSize, "name": "Image Pad For Outpaint Target Size"}, + "ImagePrepForICLora": {"class": ImagePrepForICLora, "name": "Image Prep For ICLora"}, + "ImageResizeKJ": {"class": ImageResizeKJ, "name": "Resize Image"}, + "ImageUpscaleWithModelBatched": {"class": ImageUpscaleWithModelBatched, "name": "Image Upscale With Model Batched"}, + "InsertImagesToBatchIndexed": {"class": InsertImagesToBatchIndexed, "name": "Insert Images To Batch Indexed"}, + "InsertLatentToIndexed": {"class": InsertLatentToIndex, "name": "Insert Latent To Index"}, + "LoadAndResizeImage": {"class": LoadAndResizeImage, "name": "Load & Resize Image"}, + "LoadImagesFromFolderKJ": {"class": LoadImagesFromFolderKJ, "name": "Load Images From Folder (KJ)"}, + "MergeImageChannels": {"class": MergeImageChannels, "name": "Merge Image Channels"}, + "PreviewAnimation": {"class": PreviewAnimation, "name": "Preview Animation"}, + "RemapImageRange": {"class": RemapImageRange, "name": "Remap Image Range"}, + "ReverseImageBatch": {"class": ReverseImageBatch, "name": "Reverse Image Batch"}, + "ReplaceImagesInBatch": {"class": ReplaceImagesInBatch, "name": "Replace Images In Batch"}, + "SaveImageWithAlpha": {"class": SaveImageWithAlpha, "name": "Save Image With Alpha"}, + "SaveImageKJ": {"class": SaveImageKJ, "name": "Save Image KJ"}, + "ShuffleImageBatch": {"class": ShuffleImageBatch, "name": "Shuffle Image Batch"}, + "SplitImageChannels": {"class": SplitImageChannels, "name": "Split Image Channels"}, + "TransitionImagesMulti": {"class": TransitionImagesMulti, "name": "Transition Images Multi"}, + "TransitionImagesInBatch": {"class": TransitionImagesInBatch, "name": "Transition Images In Batch"}, + #batch cropping + "BatchCropFromMask": {"class": BatchCropFromMask, "name": "Batch Crop From Mask"}, + "BatchCropFromMaskAdvanced": {"class": BatchCropFromMaskAdvanced, "name": "Batch Crop From Mask Advanced"}, + "FilterZeroMasksAndCorrespondingImages": {"class": FilterZeroMasksAndCorrespondingImages}, + "InsertImageBatchByIndexes": {"class": InsertImageBatchByIndexes, "name": "Insert Image Batch By Indexes"}, + "BatchUncrop": {"class": BatchUncrop, "name": "Batch Uncrop"}, + "BatchUncropAdvanced": {"class": BatchUncropAdvanced, "name": "Batch Uncrop Advanced"}, + "SplitBboxes": {"class": SplitBboxes, "name": "Split Bboxes"}, + "BboxToInt": {"class": BboxToInt, "name": "Bbox To Int"}, + "BboxVisualize": {"class": BboxVisualize, "name": "Bbox Visualize"}, + #noise + "GenerateNoise": {"class": GenerateNoise, "name": "Generate Noise"}, + "FlipSigmasAdjusted": {"class": FlipSigmasAdjusted, "name": "Flip Sigmas Adjusted"}, + "InjectNoiseToLatent": {"class": InjectNoiseToLatent, "name": "Inject Noise To Latent"}, + "CustomSigmas": {"class": CustomSigmas, "name": "Custom Sigmas"}, + #utility + "StringToFloatList": {"class": StringToFloatList, "name": "String to Float List"}, + "WidgetToString": {"class": WidgetToString, "name": "Widget To String"}, + "SaveStringKJ": {"class": SaveStringKJ, "name": "Save String KJ"}, + "DummyOut": {"class": DummyOut, "name": "Dummy Out"}, + "GetLatentsFromBatchIndexed": {"class": GetLatentsFromBatchIndexed, "name": "Get Latents From Batch Indexed"}, + "ScaleBatchPromptSchedule": {"class": ScaleBatchPromptSchedule, "name": "Scale Batch Prompt Schedule"}, + "CameraPoseVisualizer": {"class": CameraPoseVisualizer, "name": "Camera Pose Visualizer"}, + "AppendStringsToList": {"class": AppendStringsToList, "name": "Append Strings To List"}, + "JoinStrings": {"class": JoinStrings, "name": "Join Strings"}, + "JoinStringMulti": {"class": JoinStringMulti, "name": "Join String Multi"}, + "SomethingToString": {"class": SomethingToString, "name": "Something To String"}, + "Sleep": {"class": Sleep, "name": "Sleep"}, + "VRAM_Debug": {"class": VRAM_Debug, "name": "VRAM Debug"}, + "SomethingToString": {"class": SomethingToString, "name": "Something To String"}, + "EmptyLatentImagePresets": {"class": EmptyLatentImagePresets, "name": "Empty Latent Image Presets"}, + "EmptyLatentImageCustomPresets": {"class": EmptyLatentImageCustomPresets, "name": "Empty Latent Image Custom Presets"}, + "ModelPassThrough": {"class": ModelPassThrough, "name": "ModelPass"}, + "ModelSaveKJ": {"class": ModelSaveKJ, "name": "Model Save KJ"}, + "SetShakkerLabsUnionControlNetType": {"class": SetShakkerLabsUnionControlNetType, "name": "Set Shakker Labs Union ControlNet Type"}, + "StyleModelApplyAdvanced": {"class": StyleModelApplyAdvanced, "name": "Style Model Apply Advanced"}, + #audioscheduler stuff + "NormalizedAmplitudeToMask": {"class": NormalizedAmplitudeToMask}, + "NormalizedAmplitudeToFloatList": {"class": NormalizedAmplitudeToFloatList}, + "OffsetMaskByNormalizedAmplitude": {"class": OffsetMaskByNormalizedAmplitude}, + "ImageTransformByNormalizedAmplitude": {"class": ImageTransformByNormalizedAmplitude}, + "AudioConcatenate": {"class": AudioConcatenate}, + #curve nodes + "SplineEditor": {"class": SplineEditor, "name": "Spline Editor"}, + "CreateShapeImageOnPath": {"class": CreateShapeImageOnPath, "name": "Create Shape Image On Path"}, + "CreateShapeMaskOnPath": {"class": CreateShapeMaskOnPath, "name": "Create Shape Mask On Path"}, + "CreateTextOnPath": {"class": CreateTextOnPath, "name": "Create Text On Path"}, + "CreateGradientFromCoords": {"class": CreateGradientFromCoords, "name": "Create Gradient From Coords"}, + "CutAndDragOnPath": {"class": CutAndDragOnPath, "name": "Cut And Drag On Path"}, + "GradientToFloat": {"class": GradientToFloat, "name": "Gradient To Float"}, + "WeightScheduleExtend": {"class": WeightScheduleExtend, "name": "Weight Schedule Extend"}, + "MaskOrImageToWeight": {"class": MaskOrImageToWeight, "name": "Mask Or Image To Weight"}, + "WeightScheduleConvert": {"class": WeightScheduleConvert, "name": "Weight Schedule Convert"}, + "FloatToMask": {"class": FloatToMask, "name": "Float To Mask"}, + "FloatToSigmas": {"class": FloatToSigmas, "name": "Float To Sigmas"}, + "SigmasToFloat": {"class": SigmasToFloat, "name": "Sigmas To Float"}, + "PlotCoordinates": {"class": PlotCoordinates, "name": "Plot Coordinates"}, + "InterpolateCoords": {"class": InterpolateCoords, "name": "Interpolate Coords"}, + "PointsEditor": {"class": PointsEditor, "name": "Points Editor"}, + #experimental + "StabilityAPI_SD3": {"class": StabilityAPI_SD3, "name": "Stability API SD3"}, + "SoundReactive": {"class": SoundReactive, "name": "Sound Reactive"}, + "StableZero123_BatchSchedule": {"class": StableZero123_BatchSchedule, "name": "Stable Zero123 Batch Schedule"}, + "SV3D_BatchSchedule": {"class": SV3D_BatchSchedule, "name": "SV3D Batch Schedule"}, + "LoadResAdapterNormalization": {"class": LoadResAdapterNormalization}, + "Superprompt": {"class": Superprompt, "name": "Superprompt"}, + "GLIGENTextBoxApplyBatchCoords": {"class": GLIGENTextBoxApplyBatchCoords}, + "Intrinsic_lora_sampling": {"class": Intrinsic_lora_sampling, "name": "Intrinsic Lora Sampling"}, + "CheckpointPerturbWeights": {"class": CheckpointPerturbWeights, "name": "CheckpointPerturbWeights"}, + "Screencap_mss": {"class": Screencap_mss, "name": "Screencap mss"}, + "WebcamCaptureCV2": {"class": WebcamCaptureCV2, "name": "Webcam Capture CV2"}, + "DifferentialDiffusionAdvanced": {"class": DifferentialDiffusionAdvanced, "name": "Differential Diffusion Advanced"}, + "FluxBlockLoraLoader": {"class": FluxBlockLoraLoader, "name": "Flux Block Lora Loader"}, + "FluxBlockLoraSelect": {"class": FluxBlockLoraSelect, "name": "Flux Block Lora Select"}, + "HunyuanVideoBlockLoraSelect": {"class": HunyuanVideoBlockLoraSelect, "name": "Hunyuan Video Block Lora Select"}, + "CustomControlNetWeightsFluxFromList": {"class": CustomControlNetWeightsFluxFromList, "name": "Custom ControlNet Weights Flux From List"}, + "CheckpointLoaderKJ": {"class": CheckpointLoaderKJ, "name": "CheckpointLoaderKJ"}, + "DiffusionModelLoaderKJ": {"class": DiffusionModelLoaderKJ, "name": "Diffusion Model Loader KJ"}, + "TorchCompileModelFluxAdvanced": {"class": TorchCompileModelFluxAdvanced, "name": "TorchCompileModelFluxAdvanced"}, + "TorchCompileModelHyVideo": {"class": TorchCompileModelHyVideo, "name": "TorchCompileModelHyVideo"}, + "TorchCompileVAE": {"class": TorchCompileVAE, "name": "TorchCompileVAE"}, + "TorchCompileControlNet": {"class": TorchCompileControlNet, "name": "TorchCompileControlNet"}, + "PatchModelPatcherOrder": {"class": PatchModelPatcherOrder, "name": "Patch Model Patcher Order"}, + "TorchCompileLTXModel": {"class": TorchCompileLTXModel, "name": "TorchCompileLTXModel"}, + "TorchCompileCosmosModel": {"class": TorchCompileCosmosModel, "name": "TorchCompileCosmosModel"}, + "TorchCompileModelWanVideo": {"class": TorchCompileModelWanVideo, "name": "TorchCompileModelWanVideo"}, + "PathchSageAttentionKJ": {"class": PathchSageAttentionKJ, "name": "Patch Sage Attention KJ"}, + "LeapfusionHunyuanI2VPatcher": {"class": LeapfusionHunyuanI2V, "name": "Leapfusion Hunyuan I2V Patcher"}, + "VAELoaderKJ": {"class": VAELoaderKJ, "name": "VAELoader KJ"}, + "ScheduledCFGGuidance": {"class": ScheduledCFGGuidance, "name": "Scheduled CFG Guidance"}, + "ApplyRifleXRoPE_HunuyanVideo": {"class": ApplyRifleXRoPE_HunuyanVideo, "name": "Apply RifleXRoPE HunuyanVideo"}, + "ApplyRifleXRoPE_WanVideo": {"class": ApplyRifleXRoPE_WanVideo, "name": "Apply RifleXRoPE WanVideo"}, + "WanVideoTeaCacheKJ": {"class": WanVideoTeaCacheKJ, "name": "WanVideo Tea Cache (native)"}, + "WanVideoEnhanceAVideoKJ": {"class": WanVideoEnhanceAVideoKJ, "name": "WanVideo Enhance A Video (native)"}, + "SkipLayerGuidanceWanVideo": {"class": SkipLayerGuidanceWanVideo, "name": "Skip Layer Guidance WanVideo"}, + "TimerNodeKJ": {"class": TimerNodeKJ, "name": "Timer Node KJ"}, + "HunyuanVideoEncodeKeyframesToCond": {"class": HunyuanVideoEncodeKeyframesToCond, "name": "HunyuanVideo Encode Keyframes To Cond"}, + + #instance diffusion + "CreateInstanceDiffusionTracking": {"class": CreateInstanceDiffusionTracking}, + "AppendInstanceDiffusionTracking": {"class": AppendInstanceDiffusionTracking}, + "DrawInstanceDiffusionTracking": {"class": DrawInstanceDiffusionTracking}, +} + +def generate_node_mappings(node_config): + node_class_mappings = {} + node_display_name_mappings = {} + + for node_name, node_info in node_config.items(): + node_class_mappings[node_name] = node_info["class"] + node_display_name_mappings[node_name] = node_info.get("name", node_info["class"].__name__) + + return node_class_mappings, node_display_name_mappings + +NODE_CLASS_MAPPINGS, NODE_DISPLAY_NAME_MAPPINGS = generate_node_mappings(NODE_CONFIG) + +__all__ = ["NODE_CLASS_MAPPINGS", "NODE_DISPLAY_NAME_MAPPINGS", "WEB_DIRECTORY"] + +WEB_DIRECTORY = "./web" + +from aiohttp import web +from server import PromptServer +from pathlib import Path + +if hasattr(PromptServer, "instance"): + try: + # NOTE: we add an extra static path to avoid comfy mechanism + # that loads every script in web. + PromptServer.instance.app.add_routes( + [web.static("/kjweb_async", (Path(__file__).parent.absolute() / "kjweb_async").as_posix())] + ) + except: + pass \ No newline at end of file diff --git a/custom_nodes/ComfyUI-KJNodes-main/config.json b/custom_nodes/ComfyUI-KJNodes-main/config.json new file mode 100644 index 0000000000000000000000000000000000000000..e44b55685c9f4e723dc50f6d854e29acc4ebafa6 --- /dev/null +++ b/custom_nodes/ComfyUI-KJNodes-main/config.json @@ -0,0 +1,3 @@ +{ + "sai_api_key": "your_api_key_here" +} \ No newline at end of file diff --git a/custom_nodes/ComfyUI-KJNodes-main/custom_dimensions_example.json b/custom_nodes/ComfyUI-KJNodes-main/custom_dimensions_example.json new file mode 100644 index 0000000000000000000000000000000000000000..5c4814d377d44916a14b4d4a83b7cba72ae2958b --- /dev/null +++ b/custom_nodes/ComfyUI-KJNodes-main/custom_dimensions_example.json @@ -0,0 +1,22 @@ +[ + { + "label": "SD", + "value": "512x512" + }, + { + "label": "HD", + "value": "768x768" + }, + { + "label": "Full HD", + "value": "1024x1024" + }, + { + "label": "4k", + "value": "2048x2048" + }, + { + "label": "SVD", + "value": "1024x576" + } +] diff --git a/custom_nodes/ComfyUI-KJNodes-main/docs/images/319121566-05f66385-7568-4b1f-8bbc-11053660b02f.png b/custom_nodes/ComfyUI-KJNodes-main/docs/images/319121566-05f66385-7568-4b1f-8bbc-11053660b02f.png new file mode 100644 index 0000000000000000000000000000000000000000..e749239c1c4ffd5ab29b51695dd8d8b51ed3597f Binary files /dev/null and b/custom_nodes/ComfyUI-KJNodes-main/docs/images/319121566-05f66385-7568-4b1f-8bbc-11053660b02f.png differ diff --git a/custom_nodes/ComfyUI-KJNodes-main/docs/images/319121636-706b5081-9120-4a29-bd76-901691ada688.png b/custom_nodes/ComfyUI-KJNodes-main/docs/images/319121636-706b5081-9120-4a29-bd76-901691ada688.png new file mode 100644 index 0000000000000000000000000000000000000000..b53ad666ff060d87971f3962e74101f0cb2a5c3f Binary files /dev/null and b/custom_nodes/ComfyUI-KJNodes-main/docs/images/319121636-706b5081-9120-4a29-bd76-901691ada688.png differ diff --git a/custom_nodes/ComfyUI-KJNodes-main/example_workflows/leapfusion_hunyuuanvideo_i2v_native_testing.json b/custom_nodes/ComfyUI-KJNodes-main/example_workflows/leapfusion_hunyuuanvideo_i2v_native_testing.json new file mode 100644 index 0000000000000000000000000000000000000000..134a83788815d04b5574a807db67eb6e45bf9263 --- /dev/null +++ b/custom_nodes/ComfyUI-KJNodes-main/example_workflows/leapfusion_hunyuuanvideo_i2v_native_testing.json @@ -0,0 +1,1188 @@ +{ + "last_node_id": 86, + "last_link_id": 144, + "nodes": [ + { + "id": 62, + "type": "FluxGuidance", + "pos": [ + -630, + -170 + ], + "size": [ + 317.4000244140625, + 58 + ], + "flags": {}, + "order": 13, + "mode": 0, + "inputs": [ + { + "name": "conditioning", + "type": "CONDITIONING", + "link": 82 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 83 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "FluxGuidance" + }, + "widgets_values": [ + 6 + ] + }, + { + "id": 51, + "type": "KSamplerSelect", + "pos": [ + -610, + -480 + ], + "size": [ + 315, + 58 + ], + "flags": {}, + "order": 0, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "SAMPLER", + "type": "SAMPLER", + "links": [ + 61 + ] + } + ], + "properties": { + "Node name for S&R": "KSamplerSelect" + }, + "widgets_values": [ + "euler" + ] + }, + { + "id": 57, + "type": "VAEDecodeTiled", + "pos": [ + -200, + 90 + ], + "size": [ + 315, + 150 + ], + "flags": {}, + "order": 20, + "mode": 0, + "inputs": [ + { + "name": "samples", + "type": "LATENT", + "link": 142 + }, + { + "name": "vae", + "type": "VAE", + "link": 74 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 105 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "VAEDecodeTiled" + }, + "widgets_values": [ + 128, + 64, + 64, + 8 + ] + }, + { + "id": 65, + "type": "LoadImage", + "pos": [ + -2212.498779296875, + -632.4085083007812 + ], + "size": [ + 315, + 314 + ], + "flags": {}, + "order": 1, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 86 + ], + "slot_index": 0 + }, + { + "name": "MASK", + "type": "MASK", + "links": null + } + ], + "properties": { + "Node name for S&R": "LoadImage" + }, + "widgets_values": [ + "Mona-Lisa-oil-wood-panel-Leonardo-da.webp", + "image" + ] + }, + { + "id": 64, + "type": "VAEEncode", + "pos": [ + -1336.7884521484375, + -492.5806884765625 + ], + "size": [ + 210, + 46 + ], + "flags": {}, + "order": 14, + "mode": 0, + "inputs": [ + { + "name": "pixels", + "type": "IMAGE", + "link": 144 + }, + { + "name": "vae", + "type": "VAE", + "link": 88 + } + ], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 137 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "VAEEncode" + }, + "widgets_values": [] + }, + { + "id": 44, + "type": "UNETLoader", + "pos": [ + -2373.55029296875, + -193.91510009765625 + ], + "size": [ + 459.56060791015625, + 82 + ], + "flags": {}, + "order": 2, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 135 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "UNETLoader" + }, + "widgets_values": [ + "hyvideo\\hunyuan_video_720_fp8_e4m3fn.safetensors", + "fp8_e4m3fn_fast" + ] + }, + { + "id": 49, + "type": "VAELoader", + "pos": [ + -1876.39306640625, + -35.19633865356445 + ], + "size": [ + 433.7603454589844, + 58.71116256713867 + ], + "flags": {}, + "order": 3, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "VAE", + "type": "VAE", + "links": [ + 74, + 88 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "VAELoader" + }, + "widgets_values": [ + "hyvid\\hunyuan_video_vae_bf16.safetensors" + ] + }, + { + "id": 47, + "type": "DualCLIPLoader", + "pos": [ + -2284.893798828125, + 150.4042205810547 + ], + "size": [ + 343.3958435058594, + 106.86042785644531 + ], + "flags": {}, + "order": 4, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "CLIP", + "type": "CLIP", + "links": [ + 56 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "DualCLIPLoader" + }, + "widgets_values": [ + "clip_l.safetensors", + "llava_llama3_fp16.safetensors", + "hunyuan_video", + "default" + ] + }, + { + "id": 45, + "type": "CLIPTextEncode", + "pos": [ + -1839.1649169921875, + 143.5203094482422 + ], + "size": [ + 400, + 200 + ], + "flags": {}, + "order": 8, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 56 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 69, + 82 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CLIPTextEncode" + }, + "widgets_values": [ + "woman puts on sunglasses" + ] + }, + { + "id": 53, + "type": "EmptyHunyuanLatentVideo", + "pos": [ + -1120, + 90 + ], + "size": [ + 315, + 130 + ], + "flags": {}, + "order": 10, + "mode": 0, + "inputs": [ + { + "name": "width", + "type": "INT", + "link": 89, + "widget": { + "name": "width" + } + }, + { + "name": "height", + "type": "INT", + "link": 90, + "widget": { + "name": "height" + } + } + ], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 119 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "EmptyHunyuanLatentVideo" + }, + "widgets_values": [ + 960, + 544, + 65, + 1 + ] + }, + { + "id": 55, + "type": "ConditioningZeroOut", + "pos": [ + -910, + 300 + ], + "size": [ + 251.14309692382812, + 26 + ], + "flags": { + "collapsed": true + }, + "order": 12, + "mode": 0, + "inputs": [ + { + "name": "conditioning", + "type": "CONDITIONING", + "link": 69 + } + ], + "outputs": [ + { + "name": "CONDITIONING", + "type": "CONDITIONING", + "links": [ + 70 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "ConditioningZeroOut" + }, + "widgets_values": [] + }, + { + "id": 52, + "type": "BasicScheduler", + "pos": [ + -600, + -350 + ], + "size": [ + 315, + 106 + ], + "flags": {}, + "order": 17, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 78 + } + ], + "outputs": [ + { + "name": "SIGMAS", + "type": "SIGMAS", + "links": [ + 62 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "BasicScheduler" + }, + "widgets_values": [ + "simple", + 20, + 1 + ] + }, + { + "id": 42, + "type": "SamplerCustom", + "pos": [ + -640, + 10 + ], + "size": [ + 355.20001220703125, + 467.4666748046875 + ], + "flags": {}, + "order": 18, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 77 + }, + { + "name": "positive", + "type": "CONDITIONING", + "link": 83 + }, + { + "name": "negative", + "type": "CONDITIONING", + "link": 70 + }, + { + "name": "sampler", + "type": "SAMPLER", + "link": 61 + }, + { + "name": "sigmas", + "type": "SIGMAS", + "link": 62 + }, + { + "name": "latent_image", + "type": "LATENT", + "link": 119 + } + ], + "outputs": [ + { + "name": "output", + "type": "LATENT", + "links": null + }, + { + "name": "denoised_output", + "type": "LATENT", + "links": [ + 141 + ], + "slot_index": 1 + } + ], + "properties": { + "Node name for S&R": "SamplerCustom" + }, + "widgets_values": [ + true, + 6, + "fixed", + 1, + null + ] + }, + { + "id": 84, + "type": "GetLatentRangeFromBatch", + "pos": [ + -240, + -100 + ], + "size": [ + 340.20001220703125, + 82 + ], + "flags": {}, + "order": 19, + "mode": 0, + "inputs": [ + { + "name": "latents", + "type": "LATENT", + "link": 141 + } + ], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 142 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "GetLatentRangeFromBatch" + }, + "widgets_values": [ + 1, + -1 + ] + }, + { + "id": 50, + "type": "VHS_VideoCombine", + "pos": [ + 165.77645874023438, + -619.0606079101562 + ], + "size": [ + 1112.6898193359375, + 1076.4598388671875 + ], + "flags": {}, + "order": 21, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 105 + }, + { + "name": "audio", + "type": "AUDIO", + "link": null, + "shape": 7 + }, + { + "name": "meta_batch", + "type": "VHS_BatchManager", + "link": null, + "shape": 7 + }, + { + "name": "vae", + "type": "VAE", + "link": null, + "shape": 7 + } + ], + "outputs": [ + { + "name": "Filenames", + "type": "VHS_FILENAMES", + "links": null + } + ], + "properties": { + "Node name for S&R": "VHS_VideoCombine" + }, + "widgets_values": { + "frame_rate": 24, + "loop_count": 0, + "filename_prefix": "hyvidcomfy", + "format": "video/h264-mp4", + "pix_fmt": "yuv420p", + "crf": 19, + "save_metadata": true, + "trim_to_audio": false, + "pingpong": false, + "save_output": false, + "videopreview": { + "hidden": false, + "paused": false, + "params": { + "filename": "hyvidcomfy_00001.mp4", + "subfolder": "", + "type": "temp", + "format": "video/h264-mp4", + "frame_rate": 24, + "workflow": "hyvidcomfy_00001.png", + "fullpath": "N:\\AI\\ComfyUI\\temp\\hyvidcomfy_00001.mp4" + }, + "muted": false + } + } + }, + { + "id": 54, + "type": "ModelSamplingSD3", + "pos": [ + -1079.9112548828125, + -146.69448852539062 + ], + "size": [ + 315, + 58 + ], + "flags": {}, + "order": 16, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 117 + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 77, + 78 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "ModelSamplingSD3" + }, + "widgets_values": [ + 9 + ] + }, + { + "id": 80, + "type": "PathchSageAttentionKJ", + "pos": [ + -2273.926513671875, + -36.720542907714844 + ], + "size": [ + 315, + 58 + ], + "flags": {}, + "order": 7, + "mode": 4, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 135 + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 136 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "PathchSageAttentionKJ" + }, + "widgets_values": [ + "auto" + ] + }, + { + "id": 85, + "type": "Note", + "pos": [ + -1838.572265625, + -302.1575927734375 + ], + "size": [ + 408.4594421386719, + 58 + ], + "flags": {}, + "order": 5, + "mode": 0, + "inputs": [], + "outputs": [], + "properties": {}, + "widgets_values": [ + "https://huggingface.co/Kijai/Leapfusion-image2vid-comfy/blob/main/leapfusion_img2vid544p_comfy.safetensors" + ], + "color": "#432", + "bgcolor": "#653" + }, + { + "id": 74, + "type": "LeapfusionHunyuanI2VPatcher", + "pos": [ + -1059.552978515625, + -459.34674072265625 + ], + "size": [ + 277.3238525390625, + 150 + ], + "flags": {}, + "order": 15, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 123 + }, + { + "name": "latent", + "type": "LATENT", + "link": 137 + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 117 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "LeapfusionHunyuanI2VPatcher" + }, + "widgets_values": [ + 0, + 0, + 1, + 0.8 + ] + }, + { + "id": 59, + "type": "LoraLoaderModelOnly", + "pos": [ + -1870.3748779296875, + -194.6091766357422 + ], + "size": [ + 442.8438720703125, + 82 + ], + "flags": {}, + "order": 11, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "MODEL", + "link": 136 + } + ], + "outputs": [ + { + "name": "MODEL", + "type": "MODEL", + "links": [ + 123 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "LoraLoaderModelOnly" + }, + "widgets_values": [ + "hyvid\\musubi-tuner\\img2vid544p.safetensors", + 1 + ] + }, + { + "id": 66, + "type": "ImageResizeKJ", + "pos": [ + -1821.1531982421875, + -632.925048828125 + ], + "size": [ + 315, + 266 + ], + "flags": {}, + "order": 6, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 86 + }, + { + "name": "get_image_size", + "type": "IMAGE", + "link": null, + "shape": 7 + }, + { + "name": "width_input", + "type": "INT", + "link": null, + "widget": { + "name": "width_input" + }, + "shape": 7 + }, + { + "name": "height_input", + "type": "INT", + "link": null, + "widget": { + "name": "height_input" + }, + "shape": 7 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 143 + ], + "slot_index": 0 + }, + { + "name": "width", + "type": "INT", + "links": [ + 89 + ], + "slot_index": 1 + }, + { + "name": "height", + "type": "INT", + "links": [ + 90 + ], + "slot_index": 2 + } + ], + "properties": { + "Node name for S&R": "ImageResizeKJ" + }, + "widgets_values": [ + 960, + 640, + "lanczos", + false, + 2, + 0, + 0, + "center" + ] + }, + { + "id": 86, + "type": "ImageNoiseAugmentation", + "pos": [ + -1361.111572265625, + -667.0104370117188 + ], + "size": [ + 315, + 106 + ], + "flags": {}, + "order": 9, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 143 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 144 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "ImageNoiseAugmentation" + }, + "widgets_values": [ + 0.05, + 123, + "fixed" + ] + } + ], + "links": [ + [ + 56, + 47, + 0, + 45, + 0, + "CLIP" + ], + [ + 61, + 51, + 0, + 42, + 3, + "SAMPLER" + ], + [ + 62, + 52, + 0, + 42, + 4, + "SIGMAS" + ], + [ + 69, + 45, + 0, + 55, + 0, + "CONDITIONING" + ], + [ + 70, + 55, + 0, + 42, + 2, + "CONDITIONING" + ], + [ + 74, + 49, + 0, + 57, + 1, + "VAE" + ], + [ + 77, + 54, + 0, + 42, + 0, + "MODEL" + ], + [ + 78, + 54, + 0, + 52, + 0, + "MODEL" + ], + [ + 82, + 45, + 0, + 62, + 0, + "CONDITIONING" + ], + [ + 83, + 62, + 0, + 42, + 1, + "CONDITIONING" + ], + [ + 86, + 65, + 0, + 66, + 0, + "IMAGE" + ], + [ + 88, + 49, + 0, + 64, + 1, + "VAE" + ], + [ + 89, + 66, + 1, + 53, + 0, + "INT" + ], + [ + 90, + 66, + 2, + 53, + 1, + "INT" + ], + [ + 105, + 57, + 0, + 50, + 0, + "IMAGE" + ], + [ + 117, + 74, + 0, + 54, + 0, + "MODEL" + ], + [ + 119, + 53, + 0, + 42, + 5, + "LATENT" + ], + [ + 123, + 59, + 0, + 74, + 0, + "MODEL" + ], + [ + 135, + 44, + 0, + 80, + 0, + "MODEL" + ], + [ + 136, + 80, + 0, + 59, + 0, + "MODEL" + ], + [ + 137, + 64, + 0, + 74, + 1, + "LATENT" + ], + [ + 141, + 42, + 1, + 84, + 0, + "LATENT" + ], + [ + 142, + 84, + 0, + 57, + 0, + "LATENT" + ], + [ + 143, + 66, + 0, + 86, + 0, + "IMAGE" + ], + [ + 144, + 86, + 0, + 64, + 0, + "IMAGE" + ] + ], + "groups": [], + "config": {}, + "extra": { + "ds": { + "scale": 0.740024994425854, + "offset": [ + 2525.036093151529, + 802.59123935694 + ] + }, + "node_versions": { + "comfy-core": "0.3.13", + "ComfyUI-KJNodes": "a8aeef670b3f288303f956bf94385cb87978ea93", + "ComfyUI-VideoHelperSuite": "c47b10ca1798b4925ff5a5f07d80c51ca80a837d" + }, + "VHS_latentpreview": true, + "VHS_latentpreviewrate": 0 + }, + "version": 0.4 +} \ No newline at end of file diff --git a/custom_nodes/ComfyUI-KJNodes-main/intrinsic_loras/intrinsic_lora_sd15_albedo.safetensors b/custom_nodes/ComfyUI-KJNodes-main/intrinsic_loras/intrinsic_lora_sd15_albedo.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..3d84a8b75363549dff202eb2f2353e63d5245a04 --- /dev/null +++ b/custom_nodes/ComfyUI-KJNodes-main/intrinsic_loras/intrinsic_lora_sd15_albedo.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d897f04ff2bb452e29a8f2a3c5c3cd5c55e95f314242cd645fbbe24a5ac59961 +size 6416109 diff --git a/custom_nodes/ComfyUI-KJNodes-main/intrinsic_loras/intrinsic_lora_sd15_depth.safetensors b/custom_nodes/ComfyUI-KJNodes-main/intrinsic_loras/intrinsic_lora_sd15_depth.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..6048b84f9e5348240d84d1c0d24e96c9655032e2 --- /dev/null +++ b/custom_nodes/ComfyUI-KJNodes-main/intrinsic_loras/intrinsic_lora_sd15_depth.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f199d6bf3180fe7271073c3769dcb764b40f35f41b30fcb183ae5bf4b6a9997f +size 6416109 diff --git a/custom_nodes/ComfyUI-KJNodes-main/intrinsic_loras/intrinsic_lora_sd15_normal.safetensors b/custom_nodes/ComfyUI-KJNodes-main/intrinsic_loras/intrinsic_lora_sd15_normal.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..506b1dd0a3b9a07c423f6cda497fa6a196014c18 --- /dev/null +++ b/custom_nodes/ComfyUI-KJNodes-main/intrinsic_loras/intrinsic_lora_sd15_normal.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:02934db0a0b92a9cdda402e42548560beda7d31b268e561dbc6815551e876268 +size 6416109 diff --git a/custom_nodes/ComfyUI-KJNodes-main/intrinsic_loras/intrinsic_lora_sd15_shading.safetensors b/custom_nodes/ComfyUI-KJNodes-main/intrinsic_loras/intrinsic_lora_sd15_shading.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..5b8bbfcf7926ac3ecefe84229ca6de2fc1b523eb --- /dev/null +++ b/custom_nodes/ComfyUI-KJNodes-main/intrinsic_loras/intrinsic_lora_sd15_shading.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:635e998063a10211633edd3e4b1676201822cd67f790ec71dba5f32d8b625c8b +size 6416109 diff --git a/custom_nodes/ComfyUI-KJNodes-main/intrinsic_loras/intrinsic_loras.txt b/custom_nodes/ComfyUI-KJNodes-main/intrinsic_loras/intrinsic_loras.txt new file mode 100644 index 0000000000000000000000000000000000000000..62ee933763a8aa9e1b232d228717ac754ab22751 --- /dev/null +++ b/custom_nodes/ComfyUI-KJNodes-main/intrinsic_loras/intrinsic_loras.txt @@ -0,0 +1,4 @@ +source for the loras: +https://github.com/duxiaodan/intrinsic-lora + +Renamed and conveted to .safetensors \ No newline at end of file diff --git a/custom_nodes/ComfyUI-KJNodes-main/kjweb_async/marked.min.js b/custom_nodes/ComfyUI-KJNodes-main/kjweb_async/marked.min.js new file mode 100644 index 0000000000000000000000000000000000000000..2e66c369c388c135cc68d399861a737f4c5e68cd --- /dev/null +++ b/custom_nodes/ComfyUI-KJNodes-main/kjweb_async/marked.min.js @@ -0,0 +1,6 @@ +/** + * marked v12.0.1 - a markdown parser + * Copyright (c) 2011-2024, Christopher Jeffrey. (MIT Licensed) + * https://github.com/markedjs/marked + */ +!function(e,t){"object"==typeof exports&&"undefined"!=typeof module?t(exports):"function"==typeof define&&define.amd?define(["exports"],t):t((e="undefined"!=typeof globalThis?globalThis:e||self).marked={})}(this,(function(e){"use strict";function t(){return{async:!1,breaks:!1,extensions:null,gfm:!0,hooks:null,pedantic:!1,renderer:null,silent:!1,tokenizer:null,walkTokens:null}}function n(t){e.defaults=t}e.defaults={async:!1,breaks:!1,extensions:null,gfm:!0,hooks:null,pedantic:!1,renderer:null,silent:!1,tokenizer:null,walkTokens:null};const s=/[&<>"']/,r=new RegExp(s.source,"g"),i=/[<>"']|&(?!(#\d{1,7}|#[Xx][a-fA-F0-9]{1,6}|\w+);)/,l=new RegExp(i.source,"g"),o={"&":"&","<":"<",">":">",'"':""","'":"'"},a=e=>o[e];function c(e,t){if(t){if(s.test(e))return e.replace(r,a)}else if(i.test(e))return e.replace(l,a);return e}const h=/&(#(?:\d+)|(?:#x[0-9A-Fa-f]+)|(?:\w+));?/gi;function p(e){return e.replace(h,((e,t)=>"colon"===(t=t.toLowerCase())?":":"#"===t.charAt(0)?"x"===t.charAt(1)?String.fromCharCode(parseInt(t.substring(2),16)):String.fromCharCode(+t.substring(1)):""))}const u=/(^|[^\[])\^/g;function k(e,t){let n="string"==typeof e?e:e.source;t=t||"";const s={replace:(e,t)=>{let r="string"==typeof t?t:t.source;return r=r.replace(u,"$1"),n=n.replace(e,r),s},getRegex:()=>new RegExp(n,t)};return s}function g(e){try{e=encodeURI(e).replace(/%25/g,"%")}catch(e){return null}return e}const f={exec:()=>null};function d(e,t){const n=e.replace(/\|/g,((e,t,n)=>{let s=!1,r=t;for(;--r>=0&&"\\"===n[r];)s=!s;return s?"|":" |"})).split(/ \|/);let s=0;if(n[0].trim()||n.shift(),n.length>0&&!n[n.length-1].trim()&&n.pop(),t)if(n.length>t)n.splice(t);else for(;n.length0)return{type:"space",raw:t[0]}}code(e){const t=this.rules.block.code.exec(e);if(t){const e=t[0].replace(/^ {1,4}/gm,"");return{type:"code",raw:t[0],codeBlockStyle:"indented",text:this.options.pedantic?e:x(e,"\n")}}}fences(e){const t=this.rules.block.fences.exec(e);if(t){const e=t[0],n=function(e,t){const n=e.match(/^(\s+)(?:```)/);if(null===n)return t;const s=n[1];return t.split("\n").map((e=>{const t=e.match(/^\s+/);if(null===t)return e;const[n]=t;return n.length>=s.length?e.slice(s.length):e})).join("\n")}(e,t[3]||"");return{type:"code",raw:e,lang:t[2]?t[2].trim().replace(this.rules.inline.anyPunctuation,"$1"):t[2],text:n}}}heading(e){const t=this.rules.block.heading.exec(e);if(t){let e=t[2].trim();if(/#$/.test(e)){const t=x(e,"#");this.options.pedantic?e=t.trim():t&&!/ $/.test(t)||(e=t.trim())}return{type:"heading",raw:t[0],depth:t[1].length,text:e,tokens:this.lexer.inline(e)}}}hr(e){const t=this.rules.block.hr.exec(e);if(t)return{type:"hr",raw:t[0]}}blockquote(e){const t=this.rules.block.blockquote.exec(e);if(t){const e=x(t[0].replace(/^ *>[ \t]?/gm,""),"\n"),n=this.lexer.state.top;this.lexer.state.top=!0;const s=this.lexer.blockTokens(e);return this.lexer.state.top=n,{type:"blockquote",raw:t[0],tokens:s,text:e}}}list(e){let t=this.rules.block.list.exec(e);if(t){let n=t[1].trim();const s=n.length>1,r={type:"list",raw:"",ordered:s,start:s?+n.slice(0,-1):"",loose:!1,items:[]};n=s?`\\d{1,9}\\${n.slice(-1)}`:`\\${n}`,this.options.pedantic&&(n=s?n:"[*+-]");const i=new RegExp(`^( {0,3}${n})((?:[\t ][^\\n]*)?(?:\\n|$))`);let l="",o="",a=!1;for(;e;){let n=!1;if(!(t=i.exec(e)))break;if(this.rules.block.hr.test(e))break;l=t[0],e=e.substring(l.length);let s=t[2].split("\n",1)[0].replace(/^\t+/,(e=>" ".repeat(3*e.length))),c=e.split("\n",1)[0],h=0;this.options.pedantic?(h=2,o=s.trimStart()):(h=t[2].search(/[^ ]/),h=h>4?1:h,o=s.slice(h),h+=t[1].length);let p=!1;if(!s&&/^ *$/.test(c)&&(l+=c+"\n",e=e.substring(c.length+1),n=!0),!n){const t=new RegExp(`^ {0,${Math.min(3,h-1)}}(?:[*+-]|\\d{1,9}[.)])((?:[ \t][^\\n]*)?(?:\\n|$))`),n=new RegExp(`^ {0,${Math.min(3,h-1)}}((?:- *){3,}|(?:_ *){3,}|(?:\\* *){3,})(?:\\n+|$)`),r=new RegExp(`^ {0,${Math.min(3,h-1)}}(?:\`\`\`|~~~)`),i=new RegExp(`^ {0,${Math.min(3,h-1)}}#`);for(;e;){const a=e.split("\n",1)[0];if(c=a,this.options.pedantic&&(c=c.replace(/^ {1,4}(?=( {4})*[^ ])/g," ")),r.test(c))break;if(i.test(c))break;if(t.test(c))break;if(n.test(e))break;if(c.search(/[^ ]/)>=h||!c.trim())o+="\n"+c.slice(h);else{if(p)break;if(s.search(/[^ ]/)>=4)break;if(r.test(s))break;if(i.test(s))break;if(n.test(s))break;o+="\n"+c}p||c.trim()||(p=!0),l+=a+"\n",e=e.substring(a.length+1),s=c.slice(h)}}r.loose||(a?r.loose=!0:/\n *\n *$/.test(l)&&(a=!0));let u,k=null;this.options.gfm&&(k=/^\[[ xX]\] /.exec(o),k&&(u="[ ] "!==k[0],o=o.replace(/^\[[ xX]\] +/,""))),r.items.push({type:"list_item",raw:l,task:!!k,checked:u,loose:!1,text:o,tokens:[]}),r.raw+=l}r.items[r.items.length-1].raw=l.trimEnd(),r.items[r.items.length-1].text=o.trimEnd(),r.raw=r.raw.trimEnd();for(let e=0;e"space"===e.type)),n=t.length>0&&t.some((e=>/\n.*\n/.test(e.raw)));r.loose=n}if(r.loose)for(let e=0;e$/,"$1").replace(this.rules.inline.anyPunctuation,"$1"):"",s=t[3]?t[3].substring(1,t[3].length-1).replace(this.rules.inline.anyPunctuation,"$1"):t[3];return{type:"def",tag:e,raw:t[0],href:n,title:s}}}table(e){const t=this.rules.block.table.exec(e);if(!t)return;if(!/[:|]/.test(t[2]))return;const n=d(t[1]),s=t[2].replace(/^\||\| *$/g,"").split("|"),r=t[3]&&t[3].trim()?t[3].replace(/\n[ \t]*$/,"").split("\n"):[],i={type:"table",raw:t[0],header:[],align:[],rows:[]};if(n.length===s.length){for(const e of s)/^ *-+: *$/.test(e)?i.align.push("right"):/^ *:-+: *$/.test(e)?i.align.push("center"):/^ *:-+ *$/.test(e)?i.align.push("left"):i.align.push(null);for(const e of n)i.header.push({text:e,tokens:this.lexer.inline(e)});for(const e of r)i.rows.push(d(e,i.header.length).map((e=>({text:e,tokens:this.lexer.inline(e)}))));return i}}lheading(e){const t=this.rules.block.lheading.exec(e);if(t)return{type:"heading",raw:t[0],depth:"="===t[2].charAt(0)?1:2,text:t[1],tokens:this.lexer.inline(t[1])}}paragraph(e){const t=this.rules.block.paragraph.exec(e);if(t){const e="\n"===t[1].charAt(t[1].length-1)?t[1].slice(0,-1):t[1];return{type:"paragraph",raw:t[0],text:e,tokens:this.lexer.inline(e)}}}text(e){const t=this.rules.block.text.exec(e);if(t)return{type:"text",raw:t[0],text:t[0],tokens:this.lexer.inline(t[0])}}escape(e){const t=this.rules.inline.escape.exec(e);if(t)return{type:"escape",raw:t[0],text:c(t[1])}}tag(e){const t=this.rules.inline.tag.exec(e);if(t)return!this.lexer.state.inLink&&/^/i.test(t[0])&&(this.lexer.state.inLink=!1),!this.lexer.state.inRawBlock&&/^<(pre|code|kbd|script)(\s|>)/i.test(t[0])?this.lexer.state.inRawBlock=!0:this.lexer.state.inRawBlock&&/^<\/(pre|code|kbd|script)(\s|>)/i.test(t[0])&&(this.lexer.state.inRawBlock=!1),{type:"html",raw:t[0],inLink:this.lexer.state.inLink,inRawBlock:this.lexer.state.inRawBlock,block:!1,text:t[0]}}link(e){const t=this.rules.inline.link.exec(e);if(t){const e=t[2].trim();if(!this.options.pedantic&&/^$/.test(e))return;const t=x(e.slice(0,-1),"\\");if((e.length-t.length)%2==0)return}else{const e=function(e,t){if(-1===e.indexOf(t[1]))return-1;let n=0;for(let s=0;s-1){const n=(0===t[0].indexOf("!")?5:4)+t[1].length+e;t[2]=t[2].substring(0,e),t[0]=t[0].substring(0,n).trim(),t[3]=""}}let n=t[2],s="";if(this.options.pedantic){const e=/^([^'"]*[^\s])\s+(['"])(.*)\2/.exec(n);e&&(n=e[1],s=e[3])}else s=t[3]?t[3].slice(1,-1):"";return n=n.trim(),/^$/.test(e)?n.slice(1):n.slice(1,-1)),b(t,{href:n?n.replace(this.rules.inline.anyPunctuation,"$1"):n,title:s?s.replace(this.rules.inline.anyPunctuation,"$1"):s},t[0],this.lexer)}}reflink(e,t){let n;if((n=this.rules.inline.reflink.exec(e))||(n=this.rules.inline.nolink.exec(e))){const e=t[(n[2]||n[1]).replace(/\s+/g," ").toLowerCase()];if(!e){const e=n[0].charAt(0);return{type:"text",raw:e,text:e}}return b(n,e,n[0],this.lexer)}}emStrong(e,t,n=""){let s=this.rules.inline.emStrongLDelim.exec(e);if(!s)return;if(s[3]&&n.match(/[\p{L}\p{N}]/u))return;if(!(s[1]||s[2]||"")||!n||this.rules.inline.punctuation.exec(n)){const n=[...s[0]].length-1;let r,i,l=n,o=0;const a="*"===s[0][0]?this.rules.inline.emStrongRDelimAst:this.rules.inline.emStrongRDelimUnd;for(a.lastIndex=0,t=t.slice(-1*e.length+n);null!=(s=a.exec(t));){if(r=s[1]||s[2]||s[3]||s[4]||s[5]||s[6],!r)continue;if(i=[...r].length,s[3]||s[4]){l+=i;continue}if((s[5]||s[6])&&n%3&&!((n+i)%3)){o+=i;continue}if(l-=i,l>0)continue;i=Math.min(i,i+l+o);const t=[...s[0]][0].length,a=e.slice(0,n+s.index+t+i);if(Math.min(n,i)%2){const e=a.slice(1,-1);return{type:"em",raw:a,text:e,tokens:this.lexer.inlineTokens(e)}}const c=a.slice(2,-2);return{type:"strong",raw:a,text:c,tokens:this.lexer.inlineTokens(c)}}}}codespan(e){const t=this.rules.inline.code.exec(e);if(t){let e=t[2].replace(/\n/g," ");const n=/[^ ]/.test(e),s=/^ /.test(e)&&/ $/.test(e);return n&&s&&(e=e.substring(1,e.length-1)),e=c(e,!0),{type:"codespan",raw:t[0],text:e}}}br(e){const t=this.rules.inline.br.exec(e);if(t)return{type:"br",raw:t[0]}}del(e){const t=this.rules.inline.del.exec(e);if(t)return{type:"del",raw:t[0],text:t[2],tokens:this.lexer.inlineTokens(t[2])}}autolink(e){const t=this.rules.inline.autolink.exec(e);if(t){let e,n;return"@"===t[2]?(e=c(t[1]),n="mailto:"+e):(e=c(t[1]),n=e),{type:"link",raw:t[0],text:e,href:n,tokens:[{type:"text",raw:e,text:e}]}}}url(e){let t;if(t=this.rules.inline.url.exec(e)){let e,n;if("@"===t[2])e=c(t[0]),n="mailto:"+e;else{let s;do{s=t[0],t[0]=this.rules.inline._backpedal.exec(t[0])?.[0]??""}while(s!==t[0]);e=c(t[0]),n="www."===t[1]?"http://"+t[0]:t[0]}return{type:"link",raw:t[0],text:e,href:n,tokens:[{type:"text",raw:e,text:e}]}}}inlineText(e){const t=this.rules.inline.text.exec(e);if(t){let e;return e=this.lexer.state.inRawBlock?t[0]:c(t[0]),{type:"text",raw:t[0],text:e}}}}const m=/^ {0,3}((?:-[\t ]*){3,}|(?:_[ \t]*){3,}|(?:\*[ \t]*){3,})(?:\n+|$)/,y=/(?:[*+-]|\d{1,9}[.)])/,$=k(/^(?!bull |blockCode|fences|blockquote|heading|html)((?:.|\n(?!\s*?\n|bull |blockCode|fences|blockquote|heading|html))+?)\n {0,3}(=+|-+) *(?:\n+|$)/).replace(/bull/g,y).replace(/blockCode/g,/ {4}/).replace(/fences/g,/ {0,3}(?:`{3,}|~{3,})/).replace(/blockquote/g,/ {0,3}>/).replace(/heading/g,/ {0,3}#{1,6}/).replace(/html/g,/ {0,3}<[^\n>]+>\n/).getRegex(),z=/^([^\n]+(?:\n(?!hr|heading|lheading|blockquote|fences|list|html|table| +\n)[^\n]+)*)/,T=/(?!\s*\])(?:\\.|[^\[\]\\])+/,R=k(/^ {0,3}\[(label)\]: *(?:\n *)?([^<\s][^\s]*|<.*?>)(?:(?: +(?:\n *)?| *\n *)(title))? *(?:\n+|$)/).replace("label",T).replace("title",/(?:"(?:\\"?|[^"\\])*"|'[^'\n]*(?:\n[^'\n]+)*\n?'|\([^()]*\))/).getRegex(),_=k(/^( {0,3}bull)([ \t][^\n]+?)?(?:\n|$)/).replace(/bull/g,y).getRegex(),A="address|article|aside|base|basefont|blockquote|body|caption|center|col|colgroup|dd|details|dialog|dir|div|dl|dt|fieldset|figcaption|figure|footer|form|frame|frameset|h[1-6]|head|header|hr|html|iframe|legend|li|link|main|menu|menuitem|meta|nav|noframes|ol|optgroup|option|p|param|search|section|summary|table|tbody|td|tfoot|th|thead|title|tr|track|ul",S=/|$))/,I=k("^ {0,3}(?:<(script|pre|style|textarea)[\\s>][\\s\\S]*?(?:[^\\n]*\\n+|$)|comment[^\\n]*(\\n+|$)|<\\?[\\s\\S]*?(?:\\?>\\n*|$)|\\n*|$)|\\n*|$)|)[\\s\\S]*?(?:(?:\\n *)+\\n|$)|<(?!script|pre|style|textarea)([a-z][\\w-]*)(?:attribute)*? */?>(?=[ \\t]*(?:\\n|$))[\\s\\S]*?(?:(?:\\n *)+\\n|$)|(?=[ \\t]*(?:\\n|$))[\\s\\S]*?(?:(?:\\n *)+\\n|$))","i").replace("comment",S).replace("tag",A).replace("attribute",/ +[a-zA-Z:_][\w.:-]*(?: *= *"[^"\n]*"| *= *'[^'\n]*'| *= *[^\s"'=<>`]+)?/).getRegex(),E=k(z).replace("hr",m).replace("heading"," {0,3}#{1,6}(?:\\s|$)").replace("|lheading","").replace("|table","").replace("blockquote"," {0,3}>").replace("fences"," {0,3}(?:`{3,}(?=[^`\\n]*\\n)|~{3,})[^\\n]*\\n").replace("list"," {0,3}(?:[*+-]|1[.)]) ").replace("html",")|<(?:script|pre|style|textarea|!--)").replace("tag",A).getRegex(),q={blockquote:k(/^( {0,3}> ?(paragraph|[^\n]*)(?:\n|$))+/).replace("paragraph",E).getRegex(),code:/^( {4}[^\n]+(?:\n(?: *(?:\n|$))*)?)+/,def:R,fences:/^ {0,3}(`{3,}(?=[^`\n]*(?:\n|$))|~{3,})([^\n]*)(?:\n|$)(?:|([\s\S]*?)(?:\n|$))(?: {0,3}\1[~`]* *(?=\n|$)|$)/,heading:/^ {0,3}(#{1,6})(?=\s|$)(.*)(?:\n+|$)/,hr:m,html:I,lheading:$,list:_,newline:/^(?: *(?:\n|$))+/,paragraph:E,table:f,text:/^[^\n]+/},Z=k("^ *([^\\n ].*)\\n {0,3}((?:\\| *)?:?-+:? *(?:\\| *:?-+:? *)*(?:\\| *)?)(?:\\n((?:(?! *\\n|hr|heading|blockquote|code|fences|list|html).*(?:\\n|$))*)\\n*|$)").replace("hr",m).replace("heading"," {0,3}#{1,6}(?:\\s|$)").replace("blockquote"," {0,3}>").replace("code"," {4}[^\\n]").replace("fences"," {0,3}(?:`{3,}(?=[^`\\n]*\\n)|~{3,})[^\\n]*\\n").replace("list"," {0,3}(?:[*+-]|1[.)]) ").replace("html",")|<(?:script|pre|style|textarea|!--)").replace("tag",A).getRegex(),L={...q,table:Z,paragraph:k(z).replace("hr",m).replace("heading"," {0,3}#{1,6}(?:\\s|$)").replace("|lheading","").replace("table",Z).replace("blockquote"," {0,3}>").replace("fences"," {0,3}(?:`{3,}(?=[^`\\n]*\\n)|~{3,})[^\\n]*\\n").replace("list"," {0,3}(?:[*+-]|1[.)]) ").replace("html",")|<(?:script|pre|style|textarea|!--)").replace("tag",A).getRegex()},P={...q,html:k("^ *(?:comment *(?:\\n|\\s*$)|<(tag)[\\s\\S]+? *(?:\\n{2,}|\\s*$)|\\s]*)*?/?> *(?:\\n{2,}|\\s*$))").replace("comment",S).replace(/tag/g,"(?!(?:a|em|strong|small|s|cite|q|dfn|abbr|data|time|code|var|samp|kbd|sub|sup|i|b|u|mark|ruby|rt|rp|bdi|bdo|span|br|wbr|ins|del|img)\\b)\\w+(?!:|[^\\w\\s@]*@)\\b").getRegex(),def:/^ *\[([^\]]+)\]: *]+)>?(?: +(["(][^\n]+[")]))? *(?:\n+|$)/,heading:/^(#{1,6})(.*)(?:\n+|$)/,fences:f,lheading:/^(.+?)\n {0,3}(=+|-+) *(?:\n+|$)/,paragraph:k(z).replace("hr",m).replace("heading"," *#{1,6} *[^\n]").replace("lheading",$).replace("|table","").replace("blockquote"," {0,3}>").replace("|fences","").replace("|list","").replace("|html","").replace("|tag","").getRegex()},Q=/^\\([!"#$%&'()*+,\-./:;<=>?@\[\]\\^_`{|}~])/,v=/^( {2,}|\\)\n(?!\s*$)/,B="\\p{P}\\p{S}",C=k(/^((?![*_])[\spunctuation])/,"u").replace(/punctuation/g,B).getRegex(),M=k(/^(?:\*+(?:((?!\*)[punct])|[^\s*]))|^_+(?:((?!_)[punct])|([^\s_]))/,"u").replace(/punct/g,B).getRegex(),O=k("^[^_*]*?__[^_*]*?\\*[^_*]*?(?=__)|[^*]+(?=[^*])|(?!\\*)[punct](\\*+)(?=[\\s]|$)|[^punct\\s](\\*+)(?!\\*)(?=[punct\\s]|$)|(?!\\*)[punct\\s](\\*+)(?=[^punct\\s])|[\\s](\\*+)(?!\\*)(?=[punct])|(?!\\*)[punct](\\*+)(?!\\*)(?=[punct])|[^punct\\s](\\*+)(?=[^punct\\s])","gu").replace(/punct/g,B).getRegex(),D=k("^[^_*]*?\\*\\*[^_*]*?_[^_*]*?(?=\\*\\*)|[^_]+(?=[^_])|(?!_)[punct](_+)(?=[\\s]|$)|[^punct\\s](_+)(?!_)(?=[punct\\s]|$)|(?!_)[punct\\s](_+)(?=[^punct\\s])|[\\s](_+)(?!_)(?=[punct])|(?!_)[punct](_+)(?!_)(?=[punct])","gu").replace(/punct/g,B).getRegex(),j=k(/\\([punct])/,"gu").replace(/punct/g,B).getRegex(),H=k(/^<(scheme:[^\s\x00-\x1f<>]*|email)>/).replace("scheme",/[a-zA-Z][a-zA-Z0-9+.-]{1,31}/).replace("email",/[a-zA-Z0-9.!#$%&'*+/=?^_`{|}~-]+(@)[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(?:\.[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)+(?![-_])/).getRegex(),U=k(S).replace("(?:--\x3e|$)","--\x3e").getRegex(),X=k("^comment|^|^<[a-zA-Z][\\w-]*(?:attribute)*?\\s*/?>|^<\\?[\\s\\S]*?\\?>|^|^").replace("comment",U).replace("attribute",/\s+[a-zA-Z:_][\w.:-]*(?:\s*=\s*"[^"]*"|\s*=\s*'[^']*'|\s*=\s*[^\s"'=<>`]+)?/).getRegex(),F=/(?:\[(?:\\.|[^\[\]\\])*\]|\\.|`[^`]*`|[^\[\]\\`])*?/,N=k(/^!?\[(label)\]\(\s*(href)(?:\s+(title))?\s*\)/).replace("label",F).replace("href",/<(?:\\.|[^\n<>\\])+>|[^\s\x00-\x1f]*/).replace("title",/"(?:\\"?|[^"\\])*"|'(?:\\'?|[^'\\])*'|\((?:\\\)?|[^)\\])*\)/).getRegex(),G=k(/^!?\[(label)\]\[(ref)\]/).replace("label",F).replace("ref",T).getRegex(),J=k(/^!?\[(ref)\](?:\[\])?/).replace("ref",T).getRegex(),K={_backpedal:f,anyPunctuation:j,autolink:H,blockSkip:/\[[^[\]]*?\]\([^\(\)]*?\)|`[^`]*?`|<[^<>]*?>/g,br:v,code:/^(`+)([^`]|[^`][\s\S]*?[^`])\1(?!`)/,del:f,emStrongLDelim:M,emStrongRDelimAst:O,emStrongRDelimUnd:D,escape:Q,link:N,nolink:J,punctuation:C,reflink:G,reflinkSearch:k("reflink|nolink(?!\\()","g").replace("reflink",G).replace("nolink",J).getRegex(),tag:X,text:/^(`+|[^`])(?:(?= {2,}\n)|[\s\S]*?(?:(?=[\\t+" ".repeat(n.length)));e;)if(!(this.options.extensions&&this.options.extensions.block&&this.options.extensions.block.some((s=>!!(n=s.call({lexer:this},e,t))&&(e=e.substring(n.raw.length),t.push(n),!0)))))if(n=this.tokenizer.space(e))e=e.substring(n.raw.length),1===n.raw.length&&t.length>0?t[t.length-1].raw+="\n":t.push(n);else if(n=this.tokenizer.code(e))e=e.substring(n.raw.length),s=t[t.length-1],!s||"paragraph"!==s.type&&"text"!==s.type?t.push(n):(s.raw+="\n"+n.raw,s.text+="\n"+n.text,this.inlineQueue[this.inlineQueue.length-1].src=s.text);else if(n=this.tokenizer.fences(e))e=e.substring(n.raw.length),t.push(n);else if(n=this.tokenizer.heading(e))e=e.substring(n.raw.length),t.push(n);else if(n=this.tokenizer.hr(e))e=e.substring(n.raw.length),t.push(n);else if(n=this.tokenizer.blockquote(e))e=e.substring(n.raw.length),t.push(n);else if(n=this.tokenizer.list(e))e=e.substring(n.raw.length),t.push(n);else if(n=this.tokenizer.html(e))e=e.substring(n.raw.length),t.push(n);else if(n=this.tokenizer.def(e))e=e.substring(n.raw.length),s=t[t.length-1],!s||"paragraph"!==s.type&&"text"!==s.type?this.tokens.links[n.tag]||(this.tokens.links[n.tag]={href:n.href,title:n.title}):(s.raw+="\n"+n.raw,s.text+="\n"+n.raw,this.inlineQueue[this.inlineQueue.length-1].src=s.text);else if(n=this.tokenizer.table(e))e=e.substring(n.raw.length),t.push(n);else if(n=this.tokenizer.lheading(e))e=e.substring(n.raw.length),t.push(n);else{if(r=e,this.options.extensions&&this.options.extensions.startBlock){let t=1/0;const n=e.slice(1);let s;this.options.extensions.startBlock.forEach((e=>{s=e.call({lexer:this},n),"number"==typeof s&&s>=0&&(t=Math.min(t,s))})),t<1/0&&t>=0&&(r=e.substring(0,t+1))}if(this.state.top&&(n=this.tokenizer.paragraph(r)))s=t[t.length-1],i&&"paragraph"===s.type?(s.raw+="\n"+n.raw,s.text+="\n"+n.text,this.inlineQueue.pop(),this.inlineQueue[this.inlineQueue.length-1].src=s.text):t.push(n),i=r.length!==e.length,e=e.substring(n.raw.length);else if(n=this.tokenizer.text(e))e=e.substring(n.raw.length),s=t[t.length-1],s&&"text"===s.type?(s.raw+="\n"+n.raw,s.text+="\n"+n.text,this.inlineQueue.pop(),this.inlineQueue[this.inlineQueue.length-1].src=s.text):t.push(n);else if(e){const t="Infinite loop on byte: "+e.charCodeAt(0);if(this.options.silent){console.error(t);break}throw new Error(t)}}return this.state.top=!0,t}inline(e,t=[]){return this.inlineQueue.push({src:e,tokens:t}),t}inlineTokens(e,t=[]){let n,s,r,i,l,o,a=e;if(this.tokens.links){const e=Object.keys(this.tokens.links);if(e.length>0)for(;null!=(i=this.tokenizer.rules.inline.reflinkSearch.exec(a));)e.includes(i[0].slice(i[0].lastIndexOf("[")+1,-1))&&(a=a.slice(0,i.index)+"["+"a".repeat(i[0].length-2)+"]"+a.slice(this.tokenizer.rules.inline.reflinkSearch.lastIndex))}for(;null!=(i=this.tokenizer.rules.inline.blockSkip.exec(a));)a=a.slice(0,i.index)+"["+"a".repeat(i[0].length-2)+"]"+a.slice(this.tokenizer.rules.inline.blockSkip.lastIndex);for(;null!=(i=this.tokenizer.rules.inline.anyPunctuation.exec(a));)a=a.slice(0,i.index)+"++"+a.slice(this.tokenizer.rules.inline.anyPunctuation.lastIndex);for(;e;)if(l||(o=""),l=!1,!(this.options.extensions&&this.options.extensions.inline&&this.options.extensions.inline.some((s=>!!(n=s.call({lexer:this},e,t))&&(e=e.substring(n.raw.length),t.push(n),!0)))))if(n=this.tokenizer.escape(e))e=e.substring(n.raw.length),t.push(n);else if(n=this.tokenizer.tag(e))e=e.substring(n.raw.length),s=t[t.length-1],s&&"text"===n.type&&"text"===s.type?(s.raw+=n.raw,s.text+=n.text):t.push(n);else if(n=this.tokenizer.link(e))e=e.substring(n.raw.length),t.push(n);else if(n=this.tokenizer.reflink(e,this.tokens.links))e=e.substring(n.raw.length),s=t[t.length-1],s&&"text"===n.type&&"text"===s.type?(s.raw+=n.raw,s.text+=n.text):t.push(n);else if(n=this.tokenizer.emStrong(e,a,o))e=e.substring(n.raw.length),t.push(n);else if(n=this.tokenizer.codespan(e))e=e.substring(n.raw.length),t.push(n);else if(n=this.tokenizer.br(e))e=e.substring(n.raw.length),t.push(n);else if(n=this.tokenizer.del(e))e=e.substring(n.raw.length),t.push(n);else if(n=this.tokenizer.autolink(e))e=e.substring(n.raw.length),t.push(n);else if(this.state.inLink||!(n=this.tokenizer.url(e))){if(r=e,this.options.extensions&&this.options.extensions.startInline){let t=1/0;const n=e.slice(1);let s;this.options.extensions.startInline.forEach((e=>{s=e.call({lexer:this},n),"number"==typeof s&&s>=0&&(t=Math.min(t,s))})),t<1/0&&t>=0&&(r=e.substring(0,t+1))}if(n=this.tokenizer.inlineText(r))e=e.substring(n.raw.length),"_"!==n.raw.slice(-1)&&(o=n.raw.slice(-1)),l=!0,s=t[t.length-1],s&&"text"===s.type?(s.raw+=n.raw,s.text+=n.text):t.push(n);else if(e){const t="Infinite loop on byte: "+e.charCodeAt(0);if(this.options.silent){console.error(t);break}throw new Error(t)}}else e=e.substring(n.raw.length),t.push(n);return t}}class se{options;constructor(t){this.options=t||e.defaults}code(e,t,n){const s=(t||"").match(/^\S*/)?.[0];return e=e.replace(/\n$/,"")+"\n",s?'
'+(n?e:c(e,!0))+"
\n":"
"+(n?e:c(e,!0))+"
\n"}blockquote(e){return`
\n${e}
\n`}html(e,t){return e}heading(e,t,n){return`${e}\n`}hr(){return"
\n"}list(e,t,n){const s=t?"ol":"ul";return"<"+s+(t&&1!==n?' start="'+n+'"':"")+">\n"+e+"\n"}listitem(e,t,n){return`
  • ${e}
  • \n`}checkbox(e){return"'}paragraph(e){return`

    ${e}

    \n`}table(e,t){return t&&(t=`${t}`),"\n\n"+e+"\n"+t+"
    \n"}tablerow(e){return`\n${e}\n`}tablecell(e,t){const n=t.header?"th":"td";return(t.align?`<${n} align="${t.align}">`:`<${n}>`)+e+`\n`}strong(e){return`${e}`}em(e){return`${e}`}codespan(e){return`${e}`}br(){return"
    "}del(e){return`${e}`}link(e,t,n){const s=g(e);if(null===s)return n;let r='
    ",r}image(e,t,n){const s=g(e);if(null===s)return n;let r=`${n}0&&"paragraph"===n.tokens[0].type?(n.tokens[0].text=e+" "+n.tokens[0].text,n.tokens[0].tokens&&n.tokens[0].tokens.length>0&&"text"===n.tokens[0].tokens[0].type&&(n.tokens[0].tokens[0].text=e+" "+n.tokens[0].tokens[0].text)):n.tokens.unshift({type:"text",text:e+" "}):o+=e+" "}o+=this.parse(n.tokens,i),l+=this.renderer.listitem(o,r,!!s)}n+=this.renderer.list(l,t,s);continue}case"html":{const e=r;n+=this.renderer.html(e.text,e.block);continue}case"paragraph":{const e=r;n+=this.renderer.paragraph(this.parseInline(e.tokens));continue}case"text":{let i=r,l=i.tokens?this.parseInline(i.tokens):i.text;for(;s+1{const r=e[s].flat(1/0);n=n.concat(this.walkTokens(r,t))})):e.tokens&&(n=n.concat(this.walkTokens(e.tokens,t)))}}return n}use(...e){const t=this.defaults.extensions||{renderers:{},childTokens:{}};return e.forEach((e=>{const n={...e};if(n.async=this.defaults.async||n.async||!1,e.extensions&&(e.extensions.forEach((e=>{if(!e.name)throw new Error("extension name required");if("renderer"in e){const n=t.renderers[e.name];t.renderers[e.name]=n?function(...t){let s=e.renderer.apply(this,t);return!1===s&&(s=n.apply(this,t)),s}:e.renderer}if("tokenizer"in e){if(!e.level||"block"!==e.level&&"inline"!==e.level)throw new Error("extension level must be 'block' or 'inline'");const n=t[e.level];n?n.unshift(e.tokenizer):t[e.level]=[e.tokenizer],e.start&&("block"===e.level?t.startBlock?t.startBlock.push(e.start):t.startBlock=[e.start]:"inline"===e.level&&(t.startInline?t.startInline.push(e.start):t.startInline=[e.start]))}"childTokens"in e&&e.childTokens&&(t.childTokens[e.name]=e.childTokens)})),n.extensions=t),e.renderer){const t=this.defaults.renderer||new se(this.defaults);for(const n in e.renderer){if(!(n in t))throw new Error(`renderer '${n}' does not exist`);if("options"===n)continue;const s=n,r=e.renderer[s],i=t[s];t[s]=(...e)=>{let n=r.apply(t,e);return!1===n&&(n=i.apply(t,e)),n||""}}n.renderer=t}if(e.tokenizer){const t=this.defaults.tokenizer||new w(this.defaults);for(const n in e.tokenizer){if(!(n in t))throw new Error(`tokenizer '${n}' does not exist`);if(["options","rules","lexer"].includes(n))continue;const s=n,r=e.tokenizer[s],i=t[s];t[s]=(...e)=>{let n=r.apply(t,e);return!1===n&&(n=i.apply(t,e)),n}}n.tokenizer=t}if(e.hooks){const t=this.defaults.hooks||new le;for(const n in e.hooks){if(!(n in t))throw new Error(`hook '${n}' does not exist`);if("options"===n)continue;const s=n,r=e.hooks[s],i=t[s];le.passThroughHooks.has(n)?t[s]=e=>{if(this.defaults.async)return Promise.resolve(r.call(t,e)).then((e=>i.call(t,e)));const n=r.call(t,e);return i.call(t,n)}:t[s]=(...e)=>{let n=r.apply(t,e);return!1===n&&(n=i.apply(t,e)),n}}n.hooks=t}if(e.walkTokens){const t=this.defaults.walkTokens,s=e.walkTokens;n.walkTokens=function(e){let n=[];return n.push(s.call(this,e)),t&&(n=n.concat(t.call(this,e))),n}}this.defaults={...this.defaults,...n}})),this}setOptions(e){return this.defaults={...this.defaults,...e},this}lexer(e,t){return ne.lex(e,t??this.defaults)}parser(e,t){return ie.parse(e,t??this.defaults)}#e(e,t){return(n,s)=>{const r={...s},i={...this.defaults,...r};!0===this.defaults.async&&!1===r.async&&(i.silent||console.warn("marked(): The async option was set to true by an extension. The async: false option sent to parse will be ignored."),i.async=!0);const l=this.#t(!!i.silent,!!i.async);if(null==n)return l(new Error("marked(): input parameter is undefined or null"));if("string"!=typeof n)return l(new Error("marked(): input parameter is of type "+Object.prototype.toString.call(n)+", string expected"));if(i.hooks&&(i.hooks.options=i),i.async)return Promise.resolve(i.hooks?i.hooks.preprocess(n):n).then((t=>e(t,i))).then((e=>i.hooks?i.hooks.processAllTokens(e):e)).then((e=>i.walkTokens?Promise.all(this.walkTokens(e,i.walkTokens)).then((()=>e)):e)).then((e=>t(e,i))).then((e=>i.hooks?i.hooks.postprocess(e):e)).catch(l);try{i.hooks&&(n=i.hooks.preprocess(n));let s=e(n,i);i.hooks&&(s=i.hooks.processAllTokens(s)),i.walkTokens&&this.walkTokens(s,i.walkTokens);let r=t(s,i);return i.hooks&&(r=i.hooks.postprocess(r)),r}catch(e){return l(e)}}}#t(e,t){return n=>{if(n.message+="\nPlease report this to https://github.com/markedjs/marked.",e){const e="

    An error occurred:

    "+c(n.message+"",!0)+"
    ";return t?Promise.resolve(e):e}if(t)return Promise.reject(n);throw n}}}const ae=new oe;function ce(e,t){return ae.parse(e,t)}ce.options=ce.setOptions=function(e){return ae.setOptions(e),ce.defaults=ae.defaults,n(ce.defaults),ce},ce.getDefaults=t,ce.defaults=e.defaults,ce.use=function(...e){return ae.use(...e),ce.defaults=ae.defaults,n(ce.defaults),ce},ce.walkTokens=function(e,t){return ae.walkTokens(e,t)},ce.parseInline=ae.parseInline,ce.Parser=ie,ce.parser=ie.parse,ce.Renderer=se,ce.TextRenderer=re,ce.Lexer=ne,ce.lexer=ne.lex,ce.Tokenizer=w,ce.Hooks=le,ce.parse=ce;const he=ce.options,pe=ce.setOptions,ue=ce.use,ke=ce.walkTokens,ge=ce.parseInline,fe=ce,de=ie.parse,xe=ne.lex;e.Hooks=le,e.Lexer=ne,e.Marked=oe,e.Parser=ie,e.Renderer=se,e.TextRenderer=re,e.Tokenizer=w,e.getDefaults=t,e.lexer=xe,e.marked=ce,e.options=he,e.parse=fe,e.parseInline=ge,e.parser=de,e.setOptions=pe,e.use=ue,e.walkTokens=ke})); diff --git a/custom_nodes/ComfyUI-KJNodes-main/kjweb_async/protovis.min.js b/custom_nodes/ComfyUI-KJNodes-main/kjweb_async/protovis.min.js new file mode 100644 index 0000000000000000000000000000000000000000..dfb84166521a49e4f7e41539933b101e126bd72f --- /dev/null +++ b/custom_nodes/ComfyUI-KJNodes-main/kjweb_async/protovis.min.js @@ -0,0 +1,277 @@ +var a;if(!Array.prototype.map)Array.prototype.map=function(b,c){for(var d=this.length,f=new Array(d),g=0;g>>0,f=0;f=d)throw new Error("reduce: no values, no initial value");}for(;f=0&&d=69&&m<100?1900:0)});return"([0-9]+)";case "%Y":q.push(function(m){g=m});return"([0-9]+)";case "%%":q.push(function(){}); +return"%"}return p});(f=f.match(n))&&f.forEach(function(p,m){q[m](p)});return new Date(g,h,i,j,k,l)};return c}; +pv.Format.time=function(b){function c(f){f=Number(f);switch(b){case "short":if(f>=31536E6)return(f/31536E6).toFixed(1)+" years";else if(f>=6048E5)return(f/6048E5).toFixed(1)+" weeks";else if(f>=864E5)return(f/864E5).toFixed(1)+" days";else if(f>=36E5)return(f/36E5).toFixed(1)+" hours";else if(f>=6E4)return(f/6E4).toFixed(1)+" minutes";return(f/1E3).toFixed(1)+" seconds";case "long":var g=[],h=f%36E5/6E4>>0;g.push(d("0",2,f%6E4/1E3>>0));if(f>=36E5){var i=f%864E5/36E5>>0;g.push(d("0",2,h));if(f>=864E5){g.push(d("0", +2,i));g.push(Math.floor(f/864E5).toFixed())}else g.push(i.toFixed())}else g.push(h.toFixed());return g.reverse().join(":")}}var d=pv.Format.pad;c.format=c;c.parse=function(f){switch(b){case "short":for(var g=/([0-9,.]+)\s*([a-z]+)/g,h,i=0;h=g.exec(f);){var j=parseFloat(h[0].replace(",","")),k=0;switch(h[2].toLowerCase()){case "year":case "years":k=31536E6;break;case "week":case "weeks":k=6048E5;break;case "day":case "days":k=864E5;break;case "hour":case "hours":k=36E5;break;case "minute":case "minutes":k= +6E4;break;case "second":case "seconds":k=1E3;break}i+=j*k}return i;case "long":h=f.replace(",","").split(":").reverse();i=0;if(h.length)i+=parseFloat(h[0])*1E3;if(h.length>1)i+=parseFloat(h[1])*6E4;if(h.length>2)i+=parseFloat(h[2])*36E5;if(h.length>3)i+=parseFloat(h[3])*864E5;return i}};return c}; +pv.Format.number=function(){function b(r){if(Infinity>h)r=Math.round(r*i)/i;var s=String(Math.abs(r)).split("."),t=s[0];if(t.length>d)t=t.substring(t.length-d);if(l&&t.length3)t=t.replace(/\B(?=(?:\d{3})+(?!\d))/g,n);if(!l&&t.lengthd)s=s.substring(s.length-d);r=r[1]?Number("0."+r[1]):0;if(Infinity>h)r=Math.round(r*i)/i;return Math.round(s)+r};b.integerDigits=function(r,s){if(arguments.length){c=Number(r);d=arguments.length>1?Number(s):c;f=c+Math.floor(c/3)*n.length;return this}return[c,d]};b.fractionDigits=function(r,s){if(arguments.length){g= +Number(r);h=arguments.length>1?Number(s):g;i=Math.pow(10,h);return this}return[g,h]};b.integerPad=function(r){if(arguments.length){j=String(r);l=/\d/.test(j);return this}return j};b.fractionPad=function(r){if(arguments.length){k=String(r);return this}return k};b.decimal=function(r){if(arguments.length){q=String(r);return this}return q};b.group=function(r){if(arguments.length){n=r?String(r):"";f=c+Math.floor(c/3)*n.length;return this}return n};b.negativeAffix=function(r,s){if(arguments.length){p=String(r|| +"");m=String(s||"");return this}return[p,m]};return b};pv.map=function(b,c){var d={};return c?b.map(function(f,g){d.index=g;return c.call(d,f)}):b.slice()};pv.repeat=function(b,c){if(arguments.length==1)c=2;return pv.blend(pv.range(c).map(function(){return b}))};pv.cross=function(b,c){for(var d=[],f=0,g=b.length,h=c.length;fc){b.length=d;for(var f=c;fc?1:0}; +pv.reverseOrder=function(b,c){return cb?1:0};pv.search=function(b,c,d){if(!d)d=pv.identity;for(var f=0,g=b.length-1;f<=g;){var h=f+g>>1,i=d(b[h]);if(ic)g=h-1;else return h}return-f-1};pv.search.index=function(b,c,d){b=pv.search(b,c,d);return b<0?-b-1:b}; +pv.range=function(b,c,d){if(arguments.length==1){c=b;b=0}if(d==undefined)d=1;if((c-b)/d==Infinity)throw new Error("range must be finite");var f=[],g=0,h;c-=(c-b)*1.0E-10;if(d<0)for(;(h=b+d*g++)>c;)f.push(h);else for(;(h=b+d*g++)f){f=i;d=h}}return d}; +pv.min=function(b,c){if(c==pv.index)return 0;return Math.min.apply(null,c?pv.map(b,c):b)};pv.min.index=function(b,c){if(!b.length)return-1;if(c==pv.index)return 0;if(!c)c=pv.identity;for(var d=0,f=Infinity,g={},h=0;h0?Math.pow(c,Math.floor(pv.log(b,c))):-Math.pow(c,-Math.floor(-pv.log(-b,c)))};pv.logCeil=function(b,c){return b>0?Math.pow(c,Math.ceil(pv.log(b,c))):-Math.pow(c,-Math.ceil(-pv.log(-b,c)))}; +(function(){var b=Math.PI/180,c=180/Math.PI;pv.radians=function(d){return b*d};pv.degrees=function(d){return c*d}})();pv.keys=function(b){var c=[];for(var d in b)c.push(d);return c};pv.entries=function(b){var c=[];for(var d in b)c.push({key:d,value:b[d]});return c};pv.values=function(b){var c=[];for(var d in b)c.push(b[d]);return c};pv.dict=function(b,c){for(var d={},f={},g=0;g=94608E6){p=31536E6;u="%Y";o=function(w){w.setFullYear(w.getFullYear()+v)}}else if(t>=7776E6){p=2592E6;u="%m/%Y";o=function(w){w.setMonth(w.getMonth()+v)}}else if(t>=18144E5){p=6048E5;u="%m/%d";o=function(w){w.setDate(w.getDate()+7*v)}}else if(t>=2592E5){p=864E5;u="%m/%d";o=function(w){w.setDate(w.getDate()+v)}}else if(t>=108E5){p=36E5;u="%I:%M %p";o=function(w){w.setHours(w.getHours()+ +v)}}else if(t>=18E4){p=6E4;u="%I:%M %p";o=function(w){w.setMinutes(w.getMinutes()+v)}}else if(t>=3E3){p=1E3;u="%I:%M:%S";o=function(w){w.setSeconds(w.getSeconds()+v)}}else{p=1;u="%S.%Qs";o=function(w){w.setTime(w.getTime()+v)}}q=pv.Format.date(u);s=new Date(s);u=[];x(s,p);t=t/p;if(t>10)switch(p){case 36E5:v=t>20?6:3;s.setHours(Math.floor(s.getHours()/v)*v);break;case 2592E6:v=3;s.setMonth(Math.floor(s.getMonth()/v)*v);break;case 6E4:v=t>30?15:t>15?10:5;s.setMinutes(Math.floor(s.getMinutes()/v)*v); +break;case 1E3:v=t>90?15:t>60?10:5;s.setSeconds(Math.floor(s.getSeconds()/v)*v);break;case 1:v=t>1E3?250:t>200?100:t>100?50:t>50?25:5;s.setMilliseconds(Math.floor(s.getMilliseconds()/v)*v);break;default:v=pv.logCeil(t/15,10);if(t/v<2)v/=5;else if(t/v<5)v/=2;s.setFullYear(Math.floor(s.getFullYear()/v)*v);break}for(;;){o(s);if(s>m)break;u.push(new Date(s))}return r?u.reverse():u}arguments.length||(n=10);v=pv.logFloor(t/n,10);p=n/(t/v);if(p<=0.15)v*=10;else if(p<=0.35)v*=5;else if(p<=0.75)v*=2;p=Math.ceil(s/ +v)*v;m=Math.floor(m/v)*v;q=pv.Format.number().fractionDigits(Math.max(0,-Math.floor(pv.log(v,10)+0.01)));m=pv.range(p,m+v,v);return r?m.reverse():m};c.tickFormat=function(n){return q(n)};c.nice=function(){if(d.length!=2)return this;var n=d[0],p=d[d.length-1],m=p0;i--)l.push(-g(-j)*i);else{for(;jh[1];k--);return l.slice(j,k)};b.tickFormat=function(h){return h.toPrecision(1)}; +b.nice=function(){var h=b.domain();return b.domain(pv.logFloor(h[0],c),pv.logCeil(h[1],c))};b.base=function(h){if(arguments.length){c=Number(h);d=Math.log(c);b.transform(f,g);return this}return c};b.domain.apply(b,arguments);return b.base(10)};pv.Scale.root=function(){var b=pv.Scale.quantitative();b.power=function(c){if(arguments.length){var d=Number(c),f=1/d;b.transform(function(g){return Math.pow(g,f)},function(g){return Math.pow(g,d)});return this}return d};b.domain.apply(b,arguments);return b.power(2)}; +pv.Scale.ordinal=function(){function b(g){g in d||(d[g]=c.push(g)-1);return f[d[g]%f.length]}var c=[],d={},f=[];b.domain=function(g,h){if(arguments.length){g=g instanceof Array?arguments.length>1?pv.map(g,h):g:Array.prototype.slice.call(arguments);c=[];for(var i={},j=0;j1?pv.map(g,h):g:Array.prototype.slice.call(arguments); +if(typeof f[0]=="string")f=f.map(pv.color);return this}return f};b.split=function(g,h){var i=(h-g)/this.domain().length;f=pv.range(g+i/2,h,i);return this};b.splitFlush=function(g,h){var i=this.domain().length,j=(h-g)/(i-1);f=i==1?[(g+h)/2]:pv.range(g,h+j/2,j);return this};b.splitBanded=function(g,h,i){if(arguments.length<3)i=1;if(i<0){var j=this.domain().length;j=(h-g- -i*j)/(j+1);f=pv.range(g+j,h,j-i);f.band=-i}else{j=(h-g)/(this.domain().length+(1-i));f=pv.range(g+j*(1-i),h,j);f.band=j*i}return this}; +b.by=function(g){function h(){return b(g.apply(this,arguments))}for(var i in b)h[i]=b[i];return h};b.domain.apply(b,arguments);return b}; +pv.Scale.quantile=function(){function b(i){return h(Math.max(0,Math.min(d,pv.search.index(f,i)-1))/d)}var c=-1,d=-1,f=[],g=[],h=pv.Scale.linear();b.quantiles=function(i){if(arguments.length){c=Number(i);if(c<0){f=[g[0]].concat(g);d=g.length-1}else{f=[];f[0]=g[0];for(var j=1;j<=c;j++)f[j]=g[~~(j*(g.length-1)/c)];d=c-1}return this}return f};b.domain=function(i,j){if(arguments.length){g=i instanceof Array?pv.map(i,j):Array.prototype.slice.call(arguments);g.sort(pv.naturalOrder);b.quantiles(c);return this}return g}; +b.range=function(){if(arguments.length){h.range.apply(h,arguments);return this}return h.range()};b.by=function(i){function j(){return b(i.apply(this,arguments))}for(var k in b)j[k]=b[k];return j};b.domain.apply(b,arguments);return b}; +pv.histogram=function(b,c){var d=true;return{bins:function(f){var g=pv.map(b,c),h=[];arguments.length||(f=pv.Scale.linear(g).ticks());for(var i=0;i360)j-=360;else if(j<0)j+=360;if(j<60)return i+(h-i)*j/60;if(j<180)return h;if(j<240)return i+(h-i)*(240-j)/60;return i}function c(j){return Math.round(b(j)*255)}var d=this.h,f=this.s,g=this.l;d%=360;if(d<0)d+=360;f=Math.max(0,Math.min(f,1));g=Math.max(0,Math.min(g,1));var h=g<=0.5?g*(1+f):g+f-g*f,i=2*g-h;return pv.rgb(c(d+120),c(d),c(d-120),this.a)}; +pv.Color.names={aliceblue:"#f0f8ff",antiquewhite:"#faebd7",aqua:"#00ffff",aquamarine:"#7fffd4",azure:"#f0ffff",beige:"#f5f5dc",bisque:"#ffe4c4",black:"#000000",blanchedalmond:"#ffebcd",blue:"#0000ff",blueviolet:"#8a2be2",brown:"#a52a2a",burlywood:"#deb887",cadetblue:"#5f9ea0",chartreuse:"#7fff00",chocolate:"#d2691e",coral:"#ff7f50",cornflowerblue:"#6495ed",cornsilk:"#fff8dc",crimson:"#dc143c",cyan:"#00ffff",darkblue:"#00008b",darkcyan:"#008b8b",darkgoldenrod:"#b8860b",darkgray:"#a9a9a9",darkgreen:"#006400", +darkgrey:"#a9a9a9",darkkhaki:"#bdb76b",darkmagenta:"#8b008b",darkolivegreen:"#556b2f",darkorange:"#ff8c00",darkorchid:"#9932cc",darkred:"#8b0000",darksalmon:"#e9967a",darkseagreen:"#8fbc8f",darkslateblue:"#483d8b",darkslategray:"#2f4f4f",darkslategrey:"#2f4f4f",darkturquoise:"#00ced1",darkviolet:"#9400d3",deeppink:"#ff1493",deepskyblue:"#00bfff",dimgray:"#696969",dimgrey:"#696969",dodgerblue:"#1e90ff",firebrick:"#b22222",floralwhite:"#fffaf0",forestgreen:"#228b22",fuchsia:"#ff00ff",gainsboro:"#dcdcdc", +ghostwhite:"#f8f8ff",gold:"#ffd700",goldenrod:"#daa520",gray:"#808080",green:"#008000",greenyellow:"#adff2f",grey:"#808080",honeydew:"#f0fff0",hotpink:"#ff69b4",indianred:"#cd5c5c",indigo:"#4b0082",ivory:"#fffff0",khaki:"#f0e68c",lavender:"#e6e6fa",lavenderblush:"#fff0f5",lawngreen:"#7cfc00",lemonchiffon:"#fffacd",lightblue:"#add8e6",lightcoral:"#f08080",lightcyan:"#e0ffff",lightgoldenrodyellow:"#fafad2",lightgray:"#d3d3d3",lightgreen:"#90ee90",lightgrey:"#d3d3d3",lightpink:"#ffb6c1",lightsalmon:"#ffa07a", +lightseagreen:"#20b2aa",lightskyblue:"#87cefa",lightslategray:"#778899",lightslategrey:"#778899",lightsteelblue:"#b0c4de",lightyellow:"#ffffe0",lime:"#00ff00",limegreen:"#32cd32",linen:"#faf0e6",magenta:"#ff00ff",maroon:"#800000",mediumaquamarine:"#66cdaa",mediumblue:"#0000cd",mediumorchid:"#ba55d3",mediumpurple:"#9370db",mediumseagreen:"#3cb371",mediumslateblue:"#7b68ee",mediumspringgreen:"#00fa9a",mediumturquoise:"#48d1cc",mediumvioletred:"#c71585",midnightblue:"#191970",mintcream:"#f5fffa",mistyrose:"#ffe4e1", +moccasin:"#ffe4b5",navajowhite:"#ffdead",navy:"#000080",oldlace:"#fdf5e6",olive:"#808000",olivedrab:"#6b8e23",orange:"#ffa500",orangered:"#ff4500",orchid:"#da70d6",palegoldenrod:"#eee8aa",palegreen:"#98fb98",paleturquoise:"#afeeee",palevioletred:"#db7093",papayawhip:"#ffefd5",peachpuff:"#ffdab9",peru:"#cd853f",pink:"#ffc0cb",plum:"#dda0dd",powderblue:"#b0e0e6",purple:"#800080",red:"#ff0000",rosybrown:"#bc8f8f",royalblue:"#4169e1",saddlebrown:"#8b4513",salmon:"#fa8072",sandybrown:"#f4a460",seagreen:"#2e8b57", +seashell:"#fff5ee",sienna:"#a0522d",silver:"#c0c0c0",skyblue:"#87ceeb",slateblue:"#6a5acd",slategray:"#708090",slategrey:"#708090",snow:"#fffafa",springgreen:"#00ff7f",steelblue:"#4682b4",tan:"#d2b48c",teal:"#008080",thistle:"#d8bfd8",tomato:"#ff6347",turquoise:"#40e0d0",violet:"#ee82ee",wheat:"#f5deb3",white:"#ffffff",whitesmoke:"#f5f5f5",yellow:"#ffff00",yellowgreen:"#9acd32",transparent:pv.Color.transparent=pv.rgb(0,0,0,0)};(function(){var b=pv.Color.names;for(var c in b)b[c]=pv.color(b[c])})(); +pv.colors=function(){var b=pv.Scale.ordinal();b.range.apply(b,arguments);return b};pv.Colors={};pv.Colors.category10=function(){var b=pv.colors("#1f77b4","#ff7f0e","#2ca02c","#d62728","#9467bd","#8c564b","#e377c2","#7f7f7f","#bcbd22","#17becf");b.domain.apply(b,arguments);return b}; +pv.Colors.category20=function(){var b=pv.colors("#1f77b4","#aec7e8","#ff7f0e","#ffbb78","#2ca02c","#98df8a","#d62728","#ff9896","#9467bd","#c5b0d5","#8c564b","#c49c94","#e377c2","#f7b6d2","#7f7f7f","#c7c7c7","#bcbd22","#dbdb8d","#17becf","#9edae5");b.domain.apply(b,arguments);return b}; +pv.Colors.category19=function(){var b=pv.colors("#9c9ede","#7375b5","#4a5584","#cedb9c","#b5cf6b","#8ca252","#637939","#e7cb94","#e7ba52","#bd9e39","#8c6d31","#e7969c","#d6616b","#ad494a","#843c39","#de9ed6","#ce6dbd","#a55194","#7b4173");b.domain.apply(b,arguments);return b};pv.ramp=function(){var b=pv.Scale.linear();b.range.apply(b,arguments);return b}; +pv.Scene=pv.SvgScene={svg:"http://www.w3.org/2000/svg",xmlns:"http://www.w3.org/2000/xmlns",xlink:"http://www.w3.org/1999/xlink",xhtml:"http://www.w3.org/1999/xhtml",scale:1,events:["DOMMouseScroll","mousewheel","mousedown","mouseup","mouseover","mouseout","mousemove","click","dblclick"],implicit:{svg:{"shape-rendering":"auto","pointer-events":"painted",x:0,y:0,dy:0,"text-anchor":"start",transform:"translate(0,0)",fill:"none","fill-opacity":1,stroke:"none","stroke-opacity":1,"stroke-width":1.5,"stroke-linejoin":"miter"}, +css:{font:"10px sans-serif"}}};pv.SvgScene.updateAll=function(b){if(b.length&&b[0].reverse&&b.type!="line"&&b.type!="area"){for(var c=pv.extend(b),d=0,f=b.length-1;f>=0;d++,f--)c[d]=b[f];b=c}this.removeSiblings(this[b.type](b))};pv.SvgScene.create=function(b){return document.createElementNS(this.svg,b)}; +pv.SvgScene.expect=function(b,c,d,f){if(b){if(b.tagName=="a")b=b.firstChild;if(b.tagName!=c){c=this.create(c);b.parentNode.replaceChild(c,b);b=c}}else b=this.create(c);for(var g in d){c=d[g];if(c==this.implicit.svg[g])c=null;c==null?b.removeAttribute(g):b.setAttribute(g,c)}for(g in f){c=f[g];if(c==this.implicit.css[g])c=null;if(c==null)b.style.removeProperty(g);else b.style[g]=c}return b}; +pv.SvgScene.append=function(b,c,d){b.$scene={scenes:c,index:d};b=this.title(b,c[d]);b.parentNode||c.$g.appendChild(b);return b.nextSibling};pv.SvgScene.title=function(b,c){var d=b.parentNode;if(d&&d.tagName!="a")d=null;if(c.title){if(!d){d=this.create("a");b.parentNode&&b.parentNode.replaceChild(d,b);d.appendChild(b)}d.setAttributeNS(this.xlink,"title",c.title);return d}d&&d.parentNode.replaceChild(b,d);return b}; +pv.SvgScene.dispatch=pv.listener(function(b){var c=b.target.$scene;if(c){var d=b.type;switch(d){case "DOMMouseScroll":d="mousewheel";b.wheel=-480*b.detail;break;case "mousewheel":b.wheel=(window.opera?12:1)*b.wheelDelta;break}pv.Mark.dispatch(d,c.scenes,c.index)&&b.preventDefault()}});pv.SvgScene.removeSiblings=function(b){for(;b;){var c=b.nextSibling;b.parentNode.removeChild(b);b=c}};pv.SvgScene.undefined=function(){}; +pv.SvgScene.pathBasis=function(){function b(f,g,h,i,j){return{x:f[0]*g.left+f[1]*h.left+f[2]*i.left+f[3]*j.left,y:f[0]*g.top+f[1]*h.top+f[2]*i.top+f[3]*j.top}}var c=[[1/6,2/3,1/6,0],[0,2/3,1/3,0],[0,1/3,2/3,0],[0,1/6,2/3,1/6]],d=function(f,g,h,i){var j=b(c[1],f,g,h,i),k=b(c[2],f,g,h,i);f=b(c[3],f,g,h,i);return"C"+j.x+","+j.y+","+k.x+","+k.y+","+f.x+","+f.y};d.segment=function(f,g,h,i){var j=b(c[0],f,g,h,i),k=b(c[1],f,g,h,i),l=b(c[2],f,g,h,i);f=b(c[3],f,g,h,i);return"M"+j.x+","+j.y+"C"+k.x+","+k.y+ +","+l.x+","+l.y+","+f.x+","+f.y};return d}();pv.SvgScene.curveBasis=function(b){if(b.length<=2)return"";var c="",d=b[0],f=d,g=d,h=b[1];c+=this.pathBasis(d,f,g,h);for(var i=2;i1){j=c[1];h=b[k];k++;f+="C"+(g.left+i.x)+","+(g.top+i.y)+","+(h.left-j.x)+","+(h.top-j.y)+","+h.left+","+h.top;for(g=2;g9){k=3/Math.sqrt(k);f[h]= +k*i*d[h];f[h+1]=k*j*d[h]}}for(h=0;h2&&(g.interpolate=="basis"||g.interpolate=="cardinal"||g.interpolate=="monotone")?d:c)(l,q-1));l=q-1}}if(!j.length)return f;f=this.expect(f,"path",{"shape-rendering":g.antialias?null:"crispEdges","pointer-events":g.events,cursor:g.cursor,d:"M"+j.join("ZM")+"Z",fill:h.color,"fill-opacity":h.opacity|| +null,stroke:i.color,"stroke-opacity":i.opacity||null,"stroke-width":i.opacity?g.lineWidth/this.scale:null});return this.append(f,b,0)}; +pv.SvgScene.areaSegment=function(b){var c=b.$g.firstChild,d=b[0],f,g;if(d.interpolate=="basis"||d.interpolate=="cardinal"||d.interpolate=="monotone"){f=[];g=[];for(var h=0,i=b.length;h2&&(d.interpolate=="basis"||d.interpolate=="cardinal"||d.interpolate=="monotone"))switch(d.interpolate){case "basis":h+=this.curveBasis(b);break;case "cardinal":h+=this.curveCardinal(b,d.tension);break;case "monotone":h+=this.curveMonotone(b); +break}else for(var i=1;i1)break;return"A"+f+","+f+" 0 0,"+d+" "+c.left+","+c.top;case "step-before":return"V"+c.top+"H"+c.left;case "step-after":return"H"+c.left+"V"+c.top}return"L"+c.left+","+c.top};pv.SvgScene.lineIntersect=function(b,c,d,f){return b.plus(c.times(d.minus(b).dot(f.perp())/c.dot(f.perp())))}; +pv.SvgScene.pathJoin=function(b,c,d,f){var g=pv.vector(c.left,c.top);d=pv.vector(d.left,d.top);var h=d.minus(g),i=h.perp().norm(),j=i.times(c.lineWidth/(2*this.scale));c=g.plus(j);var k=d.plus(j),l=d.minus(j);j=g.minus(j);if(b&&b.visible){b=g.minus(b.left,b.top).perp().norm().plus(i);j=this.lineIntersect(g,b,j,h);c=this.lineIntersect(g,b,c,h)}if(f&&f.visible){f=pv.vector(f.left,f.top).minus(d).perp().norm().plus(i);l=this.lineIntersect(d,f,l,h);k=this.lineIntersect(d,f,k,h)}return"M"+c.x+","+c.y+ +"L"+k.x+","+k.y+" "+l.x+","+l.y+" "+j.x+","+j.y}; +pv.SvgScene.panel=function(b){for(var c=b.$g,d=c&&c.firstChild,f=0;f=2*Math.PI)i=i?"M0,"+j+"A"+j+","+j+" 0 1,1 0,"+-j+"A"+j+","+j+" 0 1,1 0,"+j+"M0,"+i+"A"+i+","+i+" 0 1,1 0,"+-i+"A"+i+","+i+" 0 1,1 0,"+i+"Z":"M0,"+j+"A"+j+","+j+" 0 1,1 0,"+-j+"A"+j+","+j+" 0 1,1 0,"+j+"Z";else{var l=Math.min(f.startAngle,f.endAngle),q=Math.max(f.startAngle,f.endAngle), +n=Math.cos(l),p=Math.cos(q);l=Math.sin(l);q=Math.sin(q);i=i?"M"+j*n+","+j*l+"A"+j+","+j+" 0 "+(k1?c:null)}; +a.anchor=function(b){b||(b="center");return(new pv.Anchor(this)).name(b).data(function(){return this.scene.target.map(function(c){return c.data})}).visible(function(){return this.scene.target[this.index].visible}).left(function(){var c=this.scene.target[this.index],d=c.width||0;switch(this.name()){case "bottom":case "top":case "center":return c.left+d/2;case "left":return null}return c.left+d}).top(function(){var c=this.scene.target[this.index],d=c.height||0;switch(this.name()){case "left":case "right":case "center":return c.top+ +d/2;case "top":return null}return c.top+d}).right(function(){var c=this.scene.target[this.index];return this.name()=="left"?c.right+(c.width||0):null}).bottom(function(){var c=this.scene.target[this.index];return this.name()=="top"?c.bottom+(c.height||0):null}).textAlign(function(){switch(this.name()){case "bottom":case "top":case "center":return"center";case "right":return"right"}return"left"}).textBaseline(function(){switch(this.name()){case "right":case "left":case "center":return"middle";case "top":return"top"}return"bottom"})}; +a.anchorTarget=function(){return this.target};a.margin=function(b){return this.left(b).right(b).top(b).bottom(b)};a.instance=function(b){var c=this.scene||this.parent.instance(-1).children[this.childIndex],d=!arguments.length||this.hasOwnProperty("index")?this.index:b;return c[d<0?c.length-1:d]}; +a.instances=function(b){for(var c=this,d=[],f;!(f=c.scene);){b=b.parent;d.push({index:b.index,childIndex:c.childIndex});c=c.parent}for(;d.length;){b=d.pop();f=f[b.index].children[b.childIndex]}if(this.hasOwnProperty("index")){d=pv.extend(f[this.index]);d.right=d.top=d.left=d.bottom=0;return[d]}return f};a.first=function(){return this.scene[0]};a.last=function(){return this.scene[this.scene.length-1]};a.sibling=function(){return this.index==0?null:this.scene[this.index-1]}; +a.cousin=function(){var b=this.parent;return(b=b&&b.sibling())&&b.children?b.children[this.childIndex][this.index]:null}; +a.render=function(){function b(i,j,k){i.scale=k;if(j=0;l--){var q=k[l];if(!(q.name in c)){c[q.name]=q;switch(q.name){case "data":f=q;break;case "visible":g=q;break;default:d[q.type].push(q);break}}}while(j=j.proto)}var c={},d=[[],[],[],[]],f,g;b(this);b(this.defaults);d[1].reverse();d[3].reverse();var h=this;do for(var i in h.properties)i in c||d[2].push(c[i]={name:i,type:2,value:null});while(h=h.proto);h=d[0].concat(d[1]);for(i=0;ih.id)d[g.name]={id:0,value:g.type&1?g.value.apply(this,c):g.value}}}d=this.binds.data;d=d.type& +1?d.value.apply(this,c):d.value;c.unshift(null);b.length=d.length;for(f=0;f0;l--){p=m[l];p.scale=q;q*=p.scene[p.index].transform.k}if(n.children){l=0;for(m=n.children.length;l=3*Math.PI/2};pv.Wedge.prototype.buildImplied=function(b){if(b.angle==null)b.angle=b.endAngle-b.startAngle;else if(b.endAngle==null)b.endAngle=b.startAngle+b.angle;pv.Mark.prototype.buildImplied.call(this,b)};pv.simulation=function(b){return new pv.Simulation(b)};pv.Simulation=function(b){for(var c=0;c=s,u=q.y>=t;l.leaf=false;switch((u<<1)+x){case 0:l=l.c1||(l.c1=new pv.Quadtree.Node);break;case 1:l=l.c2||(l.c2=new pv.Quadtree.Node);break;case 2:l=l.c3||(l.c3=new pv.Quadtree.Node);break;case 3:l=l.c4||(l.c4=new pv.Quadtree.Node); +break}if(x)n=s;else m=s;if(u)p=t;else r=t;c(l,q,n,p,m,r)}var f,g=Number.POSITIVE_INFINITY,h=g,i=Number.NEGATIVE_INFINITY,j=i;for(f=b;f;f=f.next){if(f.xi)i=f.x;if(f.y>j)j=f.y}f=i-g;var k=j-h;if(f>k)j=h+f;else i=g+k;this.xMin=g;this.yMin=h;this.xMax=i;this.yMax=j;this.root=new pv.Quadtree.Node;for(f=b;f;f=f.next)c(this.root,f,g,h,i,j)};pv.Quadtree.Node=function(){this.leaf=true;this.p=this.c4=this.c3=this.c2=this.c1=null};pv.Force={}; +pv.Force.charge=function(b){function c(l){function q(m){c(m);l.cn+=m.cn;n+=m.cn*m.cx;p+=m.cn*m.cy}var n=0,p=0;l.cn=0;if(!l.leaf){l.c1&&q(l.c1);l.c2&&q(l.c2);l.c3&&q(l.c3);l.c4&&q(l.c4)}if(l.p){l.cn+=b;n+=b*l.p.x;p+=b*l.p.y}l.cx=n/l.cn;l.cy=p/l.cn}function d(l,q,n,p,m,r){var s=l.cx-q.x,t=l.cy-q.y,x=1/Math.sqrt(s*s+t*t);if(l.leaf&&l.p!=q||(m-n)*xg)x=g;l=l.cn*x*x*x;s=s*l;t=t*l;q.fx+=s;q.fy+=t}}else if(!l.leaf){var u=(n+m)*0.5,o=(p+r)*0.5;l.c1&&d(l.c1,q,n,p,u,o);l.c2&&d(l.c2,q,u,p, +m,o);l.c3&&d(l.c3,q,n,o,u,r);l.c4&&d(l.c4,q,u,o,m,r);if(!(xg)x=g;if(l.p&&l.p!=q){l=b*x*x*x;s=s*l;t=t*l;q.fx+=s;q.fy+=t}}}}var f=2,g=1/f,h=500,i=1/h,j=0.9,k={};arguments.length||(b=-40);k.constant=function(l){if(arguments.length){b=Number(l);return k}return b};k.domain=function(l,q){if(arguments.length){f=Number(l);g=1/f;h=Number(q);i=1/h;return k}return[f,h]};k.theta=function(l){if(arguments.length){j=Number(l);return k}return j};k.apply=function(l,q){c(q.root);for(l=l;l;l=l.next)d(q.root, +l,q.xMin,q.yMin,q.xMax,q.yMax)};return k};pv.Force.drag=function(b){var c={};arguments.length||(b=0.1);c.constant=function(d){if(arguments.length){b=d;return c}return b};c.apply=function(d){if(b)for(d=d;d;d=d.next){d.fx-=b*d.vx;d.fy-=b*d.vy}};return c}; +pv.Force.spring=function(b){var c=0.1,d=20,f,g,h={};arguments.length||(b=0.1);h.links=function(i){if(arguments.length){f=i;g=i.map(function(j){return 1/Math.sqrt(Math.max(j.sourceNode.linkDegree,j.targetNode.linkDegree))});return h}return f};h.constant=function(i){if(arguments.length){b=Number(i);return h}return b};h.damping=function(i){if(arguments.length){c=Number(i);return h}return c};h.length=function(i){if(arguments.length){d=Number(i);return h}return d};h.apply=function(){for(var i=0;ig,o=sh){l.c1&&u&&c(l.c1,q,n,p,s,t);l.c2&&o&&c(l.c2,q,s,p,m,t)}if(x){l.c3&&u&&c(l.c3,q,n,t,s,r);l.c4&&o&&c(l.c4,q,s,t,m,r)}}if(l.p&&l.p!=q){n=q.x-l.p.x;p=q.y-l.p.y;m=Math.sqrt(n*n+p*p);r=f+b(l.p);if(mm)m=p}for(var r=0;rc.max?c.max:g.x;if(d)for(g=f;g;g=g.next)g.y=g.yd.max?d.max:g.y};return b};pv.Layout=function(){pv.Panel.call(this)};pv.Layout.prototype=pv.extend(pv.Panel); +pv.Layout.prototype.property=function(b,c){if(!this.hasOwnProperty("properties"))this.properties=pv.extend(this.properties);this.properties[b]=true;this.propertyMethod(b,false,pv.Mark.cast[b]=c);return this}; +pv.Layout.Network=function(){pv.Layout.call(this);var b=this;this.$id=pv.id();(this.node=(new pv.Mark).data(function(){return b.nodes()}).strokeStyle("#1f77b4").fillStyle("#fff").left(function(c){return c.x}).top(function(c){return c.y})).parent=this;this.link=(new pv.Mark).extend(this.node).data(function(c){return[c.sourceNode,c.targetNode]}).fillStyle(null).lineWidth(function(c,d){return d.linkValue*1.5}).strokeStyle("rgba(0,0,0,.2)");this.link.add=function(c){return b.add(pv.Panel).data(function(){return b.links()}).add(c).extend(this)}; +(this.label=(new pv.Mark).extend(this.node).textMargin(7).textBaseline("middle").text(function(c){return c.nodeName||c.nodeValue}).textAngle(function(c){c=c.midAngle;return pv.Wedge.upright(c)?c:c+Math.PI}).textAlign(function(c){return pv.Wedge.upright(c.midAngle)?"left":"right"})).parent=this}; +pv.Layout.Network.prototype=pv.extend(pv.Layout).property("nodes",function(b){return b.map(function(c,d){if(typeof c!="object")c={nodeValue:c};c.index=d;return c})}).property("links",function(b){return b.map(function(c){if(isNaN(c.linkValue))c.linkValue=isNaN(c.value)?1:c.value;return c})});pv.Layout.Network.prototype.reset=function(){this.$id=pv.id();return this};pv.Layout.Network.prototype.buildProperties=function(b,c){if((b.$id||0)=this.$id)return true;b.$id=this.$id;b.nodes.forEach(function(c){c.linkDegree=0});b.links.forEach(function(c){var d=c.linkValue;(c.sourceNode||(c.sourceNode=b.nodes[c.source])).linkDegree+=d;(c.targetNode||(c.targetNode=b.nodes[c.target])).linkDegree+=d})};pv.Layout.Hierarchy=function(){pv.Layout.Network.call(this);this.link.strokeStyle("#ccc")};pv.Layout.Hierarchy.prototype=pv.extend(pv.Layout.Network); +pv.Layout.Hierarchy.prototype.buildImplied=function(b){if(!b.links)b.links=pv.Layout.Hierarchy.links.call(this);pv.Layout.Network.prototype.buildImplied.call(this,b)};pv.Layout.Hierarchy.links=function(){return this.nodes().filter(function(b){return b.parentNode}).map(function(b){return{sourceNode:b,targetNode:b.parentNode,linkValue:1}})}; +pv.Layout.Hierarchy.NodeLink={buildImplied:function(b){function c(m){return m.parentNode?m.depth*(n-q)+q:0}function d(m){return m.parentNode?(m.breadth-0.25)*2*Math.PI:0}function f(m){switch(i){case "left":return m.depth*k;case "right":return k-m.depth*k;case "top":return m.breadth*k;case "bottom":return k-m.breadth*k;case "radial":return k/2+c(m)*Math.cos(m.midAngle)}}function g(m){switch(i){case "left":return m.breadth*l;case "right":return l-m.breadth*l;case "top":return m.depth*l;case "bottom":return l- +m.depth*l;case "radial":return l/2+c(m)*Math.sin(m.midAngle)}}var h=b.nodes,i=b.orient,j=/^(top|bottom)$/.test(i),k=b.width,l=b.height;if(i=="radial"){var q=b.innerRadius,n=b.outerRadius;if(q==null)q=0;if(n==null)n=Math.min(k,l)/2}for(b=0;bb.dy?0:-Math.PI/2});(this.leaf=(new pv.Mark).extend(this.node).fillStyle(null).strokeStyle(null).visible(function(b){return!b.firstChild})).parent= +this;delete this.link};pv.Layout.Treemap.prototype=pv.extend(pv.Layout.Hierarchy).property("round",Boolean).property("paddingLeft",Number).property("paddingRight",Number).property("paddingTop",Number).property("paddingBottom",Number).property("mode",String).property("order",String);a=pv.Layout.Treemap.prototype;a.defaults=(new pv.Layout.Treemap).extend(pv.Layout.Hierarchy.prototype.defaults).mode("squarify").order("ascending");a.padding=function(b){return this.paddingLeft(b).paddingRight(b).paddingTop(b).paddingBottom(b)}; +a.$size=function(b){return Number(b.nodeValue)};a.size=function(b){this.$size=pv.functor(b);return this}; +a.buildImplied=function(b){function c(r,s,t,x,u,o,v){for(var w=0,y=0;wt)t=v;u+=v}u*=u;s*=s;return Math.max(s*t/u,u/(s*x))}function f(r,s){function t(A){var D=o==y,G=pv.sum(A,n),E=y?p(G/y):0;c(A,G,D,x,u,D?o:E,D?E:v);if(D){u+=E;v-=E}else{x+= +E;o-=E}y=Math.min(o,v);return D}var x=r.x+j,u=r.y+l,o=r.dx-j-k,v=r.dy-l-q;if(m!="squarify")c(r.childNodes,r.size,m=="slice"?true:m=="dice"?false:s&1,x,u,o,v);else{var w=[];s=Infinity;var y=Math.min(o,v),z=o*v/r.size;if(!(r.size<=0)){r.visitBefore(function(A){A.size*=z});for(r=r.childNodes.slice();r.length;){var C=r[r.length-1];if(C.size){w.push(C);z=d(w,y);if(z<=s){r.pop();s=z}else{w.pop();t(w);w.length=0;s=Infinity}}else r.pop()}if(t(w))for(s=0;s0){i(k(C,o,v),o,B);A+=B;D+=B}G+=C.mod;A+=y.mod;E+=w.mod;D+=z.mod;C=h(C);y=g(y)}if(C&&!h(z)){z.thread=C;z.mod+=G-D}if(y&&!g(w)){w.thread=y;w.mod+=A-E;v=o}}return v}function g(o){return o.firstChild||o.thread}function h(o){return o.lastChild||o.thread}function i(o,v,w){var y=v.number-o.number;v.change-=w/y;v.shift+=w;o.change+= +w/y;v.prelim+=w;v.mod+=w}function j(o){var v=0,w=0;for(o=o.lastChild;o;o=o.previousSibling){o.prelim+=v;o.mod+=v;w+=o.change;v+=o.shift+w}}function k(o,v,w){return o.ancestor.parentNode==v.parentNode?o.ancestor:w}function l(o,v){return(v?1:t+1)/(m=="radial"?o:1)}function q(o){return m=="radial"?o.breadth/r:0}function n(o){switch(m){case "left":return o.depth;case "right":return x-o.depth;case "top":case "bottom":return o.breadth+x/2;case "radial":return x/2+o.depth*Math.cos(q(o))}}function p(o){switch(m){case "left":case "right":return o.breadth+ +u/2;case "top":return o.depth;case "bottom":return u-o.depth;case "radial":return u/2+o.depth*Math.sin(q(o))}}if(!pv.Layout.Hierarchy.prototype.buildImplied.call(this,b)){var m=b.orient,r=b.depth,s=b.breadth,t=b.group,x=b.width,u=b.height;b=b.nodes[0];b.visitAfter(function(o,v){o.ancestor=o;o.prelim=0;o.mod=0;o.change=0;o.shift=0;o.number=o.previousSibling?o.previousSibling.number+1:0;o.depth=v});c(b);d(b,-b.prelim,0);b.visitAfter(function(o){o.breadth*=s;o.depth*=r;o.midAngle=q(o);o.x=n(o);o.y=p(o); +if(o.firstChild)o.midAngle+=Math.PI;delete o.breadth;delete o.depth;delete o.ancestor;delete o.prelim;delete o.mod;delete o.change;delete o.shift;delete o.number;delete o.thread})}};pv.Layout.Indent=function(){pv.Layout.Hierarchy.call(this);this.link.interpolate("step-after")};pv.Layout.Indent.prototype=pv.extend(pv.Layout.Hierarchy).property("depth",Number).property("breadth",Number);pv.Layout.Indent.prototype.defaults=(new pv.Layout.Indent).extend(pv.Layout.Hierarchy.prototype.defaults).depth(15).breadth(15); +pv.Layout.Indent.prototype.buildImplied=function(b){function c(i,j,k){i.x=g+k++*f;i.y=h+j++*d;i.midAngle=0;for(i=i.firstChild;i;i=i.nextSibling)j=c(i,j,k);return j}if(!pv.Layout.Hierarchy.prototype.buildImplied.call(this,b)){var d=b.breadth,f=b.depth,g=0,h=0;c(b.nodes[0],1,1)}};pv.Layout.Pack=function(){pv.Layout.Hierarchy.call(this);this.node.radius(function(b){return b.radius}).strokeStyle("rgb(31, 119, 180)").fillStyle("rgba(31, 119, 180, .25)");this.label.textAlign("center");delete this.link}; +pv.Layout.Pack.prototype=pv.extend(pv.Layout.Hierarchy).property("spacing",Number).property("order",String);pv.Layout.Pack.prototype.defaults=(new pv.Layout.Pack).extend(pv.Layout.Hierarchy.prototype.defaults).spacing(1).order("ascending");pv.Layout.Pack.prototype.$radius=function(){return 1};pv.Layout.Pack.prototype.size=function(b){this.$radius=typeof b=="function"?function(){return Math.sqrt(b.apply(this,arguments))}:(b=Math.sqrt(b),function(){return b});return this}; +pv.Layout.Pack.prototype.buildImplied=function(b){function c(n){var p=pv.Mark.stack;p.unshift(null);for(var m=0,r=n.length;m0.0010}var t=Infinity,x=-Infinity,u=Infinity,o=-Infinity,v,w,y,z,C;v=n[0];v.x=-v.radius;v.y=0;p(v);if(n.length>1){w=n[1];w.x=w.radius;w.y=0;p(w);if(n.length>2){y=n[2];g(v,w,y);p(y);m(v,y);v.p= +y;m(y,w);w=v.n;for(var A=3;A0){r(v,z);w=z;A--}else if(D<0){r(z,w);v=z;A--}}}}v=(t+x)/2;w=(u+o)/2;for(A=y=0;An.min){n.sim.step(); +q=true}q&&d.render()},42)}else for(k=0;kg)g=j;i.size=i.firstChild?pv.sum(i.childNodes,function(k){return k.size}):c.$size.apply(c,(f[0]=i,f))});f.shift();switch(b.order){case "ascending":d.sort(function(i,j){return i.size-j.size});break;case "descending":d.sort(function(i,j){return j.size-i.size});break}var h=1/g;d.minBreadth=0;d.breadth= +0.5;d.maxBreadth=1;d.visitBefore(function(i){for(var j=i.minBreadth,k=i.maxBreadth-j,l=i.firstChild;l;l=l.nextSibling){l.minBreadth=j;l.maxBreadth=j+=l.size/i.size*k;l.breadth=(j+l.minBreadth)/2}});d.visitAfter(function(i,j){i.minDepth=(j-1)*h;i.maxDepth=i.depth=j*h});pv.Layout.Hierarchy.NodeLink.buildImplied.call(this,b)}};pv.Layout.Partition.Fill=function(){pv.Layout.Partition.call(this);pv.Layout.Hierarchy.Fill.constructor.call(this)};pv.Layout.Partition.Fill.prototype=pv.extend(pv.Layout.Partition); +pv.Layout.Partition.Fill.prototype.buildImplied=function(b){pv.Layout.Partition.prototype.buildImplied.call(this,b)||pv.Layout.Hierarchy.Fill.buildImplied.call(this,b)};pv.Layout.Arc=function(){pv.Layout.Network.call(this);var b,c,d,f=this.buildImplied;this.buildImplied=function(g){f.call(this,g);c=g.directed;b=g.orient=="radial"?"linear":"polar";d=g.orient=="right"||g.orient=="top"};this.link.data(function(g){var h=g.sourceNode;g=g.targetNode;return d!=(c||h.breadth>1)*f:null}).bottom(function(k,l){return d=="mirror"?l&1?null:(l+1>>1)*-f:(l&1||-1)*(l+1>>1)*f}).fillStyle(function(k,l){return(l&1?h:i)((l>>1)+1)});this.band.add=function(k){return b.add(pv.Panel).extend(c).add(k).extend(this)}};pv.Layout.Horizon.prototype=pv.extend(pv.Layout).property("bands",Number).property("mode",String).property("backgroundStyle",pv.color).property("positiveStyle",pv.color).property("negativeStyle",pv.color); +pv.Layout.Horizon.prototype.defaults=(new pv.Layout.Horizon).extend(pv.Layout.prototype.defaults).bands(2).mode("offset").backgroundStyle("white").positiveStyle("#1f77b4").negativeStyle("#d62728"); +pv.Layout.Rollup=function(){pv.Layout.Network.call(this);var b=this,c,d,f=b.buildImplied;this.buildImplied=function(g){f.call(this,g);c=g.$rollup.nodes;d=g.$rollup.links};this.node.data(function(){return c}).size(function(g){return g.nodes.length*20});this.link.interpolate("polar").eccentricity(0.8);this.link.add=function(g){return b.add(pv.Panel).data(function(){return d}).add(g).extend(this)}};pv.Layout.Rollup.prototype=pv.extend(pv.Layout.Network).property("directed",Boolean); +pv.Layout.Rollup.prototype.x=function(b){this.$x=pv.functor(b);return this};pv.Layout.Rollup.prototype.y=function(b){this.$y=pv.functor(b);return this}; +pv.Layout.Rollup.prototype.buildImplied=function(b){function c(r){return i[r]+","+j[r]}if(!pv.Layout.Network.prototype.buildImplied.call(this,b)){var d=b.nodes,f=b.links,g=b.directed,h=d.length,i=[],j=[],k=0,l={},q={},n=pv.Mark.stack,p={parent:this};n.unshift(null);for(var m=0;mk.index?k.index+","+d.index:d.index+","+k.index;(n=q[h])||(n=q[h]={sourceNode:d,targetNode:k,linkValue:0,links:[]});n.links.push(f[m]);n.linkValue+=f[m].linkValue}b.$rollup={nodes:pv.values(l),links:pv.values(q)}}}; +pv.Layout.Matrix=function(){pv.Layout.Network.call(this);var b,c,d,f,g,h=this.buildImplied;this.buildImplied=function(i){h.call(this,i);b=i.nodes.length;c=i.width/b;d=i.height/b;f=i.$matrix.labels;g=i.$matrix.pairs};this.link.data(function(){return g}).left(function(){return c*(this.index%b)}).top(function(){return d*Math.floor(this.index/b)}).width(function(){return c}).height(function(){return d}).lineWidth(1.5).strokeStyle("#fff").fillStyle(function(i){return i.linkValue?"#555":"#eee"}).parent= +this;delete this.link.add;this.label.data(function(){return f}).left(function(){return this.index&1?c*((this.index>>1)+0.5):0}).top(function(){return this.index&1?0:d*((this.index>>1)+0.5)}).textMargin(4).textAlign(function(){return this.index&1?"left":"right"}).textAngle(function(){return this.index&1?-Math.PI/2:0});delete this.node};pv.Layout.Matrix.prototype=pv.extend(pv.Layout.Network).property("directed",Boolean);pv.Layout.Matrix.prototype.sort=function(b){this.$sort=b;return this}; +pv.Layout.Matrix.prototype.buildImplied=function(b){if(!pv.Layout.Network.prototype.buildImplied.call(this,b)){var c=b.nodes,d=b.links,f=this.$sort,g=c.length,h=pv.range(g),i=[],j=[],k={};b.$matrix={labels:i,pairs:j};f&&h.sort(function(m,r){return f(c[m],c[r])});for(var l=0;lk)l=null;if(g){if(l&&g.scene==l.scene&&g.index==l.index)return;pv.Mark.dispatch("unpoint",g.scene,g.index)}if(g=l){pv.Mark.dispatch("point",l.scene,l.index);pv.listen(this.root.canvas(),"mouseout",f)}}function f(l){if(g&&!pv.ancestor(this,l.relatedTarget)){pv.Mark.dispatch("unpoint",g.scene,g.index);g=null}}var g,h=null,i=1,j=1,k=arguments.length?b*b:900;d.collapse=function(l){if(arguments.length){h=String(l);switch(h){case "y":i= +1;j=0;break;case "x":i=0;j=1;break;default:j=i=1;break}return d}return h};return d}; +pv.Behavior.select=function(){function b(j){g=this.index;f=this.scene;i=this.mouse();h=j;h.x=i.x;h.y=i.y;h.dx=h.dy=0;pv.Mark.dispatch("selectstart",f,g)}function c(){if(f){f.mark.context(f,g,function(){var j=this.mouse();h.x=Math.max(0,Math.min(i.x,j.x));h.y=Math.max(0,Math.min(i.y,j.y));h.dx=Math.min(this.width(),Math.max(j.x,i.x))-h.x;h.dy=Math.min(this.height(),Math.max(j.y,i.y))-h.y;this.render()});pv.Mark.dispatch("select",f,g)}}function d(){if(f){pv.Mark.dispatch("selectend",f,g);f=null}}var f, +g,h,i;pv.listen(window,"mousemove",c);pv.listen(window,"mouseup",d);return b}; +pv.Behavior.resize=function(b){function c(k){h=this.index;g=this.scene;j=this.mouse();i=k;switch(b){case "left":j.x=i.x+i.dx;break;case "right":j.x=i.x;break;case "top":j.y=i.y+i.dy;break;case "bottom":j.y=i.y;break}pv.Mark.dispatch("resizestart",g,h)}function d(){if(g){g.mark.context(g,h,function(){var k=this.mouse();i.x=Math.max(0,Math.min(j.x,k.x));i.y=Math.max(0,Math.min(j.y,k.y));i.dx=Math.min(this.parent.width(),Math.max(k.x,j.x))-i.x;i.dy=Math.min(this.parent.height(),Math.max(k.y,j.y))-i.y; +this.render()});pv.Mark.dispatch("resize",g,h)}}function f(){if(g){pv.Mark.dispatch("resizeend",g,h);g=null}}var g,h,i,j;pv.listen(window,"mousemove",d);pv.listen(window,"mouseup",f);return c}; +pv.Behavior.pan=function(){function b(){g=this.index;f=this.scene;i=pv.vector(pv.event.pageX,pv.event.pageY);h=this.transform();j=1/(h.k*this.scale);if(k)k={x:(1-h.k)*this.width(),y:(1-h.k)*this.height()}}function c(){if(f){f.mark.context(f,g,function(){var l=h.translate((pv.event.pageX-i.x)*j,(pv.event.pageY-i.y)*j);if(k){l.x=Math.max(k.x,Math.min(0,l.x));l.y=Math.max(k.y,Math.min(0,l.y))}this.transform(l).render()});pv.Mark.dispatch("pan",f,g)}}function d(){f=null}var f,g,h,i,j,k;b.bound=function(l){if(arguments.length){k= +Boolean(l);return this}return Boolean(k)};pv.listen(window,"mousemove",c);pv.listen(window,"mouseup",d);return b}; +pv.Behavior.zoom=function(b){function c(){var f=this.mouse(),g=pv.event.wheel*b;f=this.transform().translate(f.x,f.y).scale(g<0?1E3/(1E3-g):(1E3+g)/1E3).translate(-f.x,-f.y);if(d){f.k=Math.max(1,f.k);f.x=Math.max((1-f.k)*this.width(),Math.min(0,f.x));f.y=Math.max((1-f.k)*this.height(),Math.min(0,f.y))}this.transform(f).render();pv.Mark.dispatch("zoom",this.scene,this.index)}var d;arguments.length||(b=1/48);c.bound=function(f){if(arguments.length){d=Boolean(f);return this}return Boolean(d)};return c}; +pv.Geo=function(){}; +pv.Geo.projections={mercator:{project:function(b){return{x:b.lng/180,y:b.lat>85?1:b.lat<-85?-1:Math.log(Math.tan(Math.PI/4+pv.radians(b.lat)/2))/Math.PI}},invert:function(b){return{lng:b.x*180,lat:pv.degrees(2*Math.atan(Math.exp(b.y*Math.PI))-Math.PI/2)}}},"gall-peters":{project:function(b){return{x:b.lng/180,y:Math.sin(pv.radians(b.lat))}},invert:function(b){return{lng:b.x*180,lat:pv.degrees(Math.asin(b.y))}}},sinusoidal:{project:function(b){return{x:pv.radians(b.lng)*Math.cos(pv.radians(b.lat))/Math.PI, +y:b.lat/90}},invert:function(b){return{lng:pv.degrees(b.x*Math.PI/Math.cos(b.y*Math.PI/2)),lat:b.y*90}}},aitoff:{project:function(b){var c=pv.radians(b.lng);b=pv.radians(b.lat);var d=Math.acos(Math.cos(b)*Math.cos(c/2));return{x:2*(d?Math.cos(b)*Math.sin(c/2)*d/Math.sin(d):0)/Math.PI,y:2*(d?Math.sin(b)*d/Math.sin(d):0)/Math.PI}},invert:function(b){var c=b.y*Math.PI/2;return{lng:pv.degrees(b.x*Math.PI/2/Math.cos(c)),lat:pv.degrees(c)}}},hammer:{project:function(b){var c=pv.radians(b.lng);b=pv.radians(b.lat); +var d=Math.sqrt(1+Math.cos(b)*Math.cos(c/2));return{x:2*Math.SQRT2*Math.cos(b)*Math.sin(c/2)/d/3,y:Math.SQRT2*Math.sin(b)/d/1.5}},invert:function(b){var c=b.x*3;b=b.y*1.5;var d=Math.sqrt(1-c*c/16-b*b/4);return{lng:pv.degrees(2*Math.atan2(d*c,2*(2*d*d-1))),lat:pv.degrees(Math.asin(d*b))}}},identity:{project:function(b){return{x:b.lng/180,y:b.lat/90}},invert:function(b){return{lng:b.x*180,lat:b.y*90}}}}; +pv.Geo.scale=function(b){function c(m){if(!n||m.lng!=n.lng||m.lat!=n.lat){n=m;m=d(m);p={x:k(m.x),y:l(m.y)}}return p}function d(m){return j.project({lng:m.lng-q.lng,lat:m.lat})}function f(m){m=j.invert(m);m.lng+=q.lng;return m}var g={x:0,y:0},h={x:1,y:1},i=[],j=pv.Geo.projections.identity,k=pv.Scale.linear(-1,1).range(0,1),l=pv.Scale.linear(-1,1).range(1,0),q={lng:0,lat:0},n,p;c.x=function(m){return c(m).x};c.y=function(m){return c(m).y};c.ticks={lng:function(m){var r;if(i.length>1){var s=pv.Scale.linear(); +if(m==undefined)m=10;r=s.domain(i,function(t){return t.lat}).ticks(m);m=s.domain(i,function(t){return t.lng}).ticks(m)}else{r=pv.range(-80,81,10);m=pv.range(-180,181,10)}return m.map(function(t){return r.map(function(x){return{lat:x,lng:t}})})},lat:function(m){return pv.transpose(c.ticks.lng(m))}};c.invert=function(m){return f({x:k.invert(m.x),y:l.invert(m.y)})};c.domain=function(m,r){if(arguments.length){i=m instanceof Array?arguments.length>1?pv.map(m,r):m:Array.prototype.slice.call(arguments); +if(i.length>1){var s=i.map(function(x){return x.lng}),t=i.map(function(x){return x.lat});q={lng:(pv.max(s)+pv.min(s))/2,lat:(pv.max(t)+pv.min(t))/2};s=i.map(d);k.domain(s,function(x){return x.x});l.domain(s,function(x){return x.y})}else{q={lng:0,lat:0};k.domain(-1,1);l.domain(-1,1)}n=null;return this}return i};c.range=function(m,r){if(arguments.length){if(typeof m=="object"){g={x:Number(m.x),y:Number(m.y)};h={x:Number(r.x),y:Number(r.y)}}else{g={x:0,y:0};h={x:Number(m),y:Number(r)}}k.range(g.x,h.x); +l.range(h.y,g.y);n=null;return this}return[g,h]};c.projection=function(m){if(arguments.length){j=typeof m=="string"?pv.Geo.projections[m]||pv.Geo.projections.identity:m;return this.domain(i)}return m};c.by=function(m){function r(){return c(m.apply(this,arguments))}for(var s in c)r[s]=c[s];return r};arguments.length&&c.projection(b);return c}; diff --git a/custom_nodes/ComfyUI-KJNodes-main/kjweb_async/purify.min.js b/custom_nodes/ComfyUI-KJNodes-main/kjweb_async/purify.min.js new file mode 100644 index 0000000000000000000000000000000000000000..c2f5164618eebcc44b0186f594ccb8092639c670 --- /dev/null +++ b/custom_nodes/ComfyUI-KJNodes-main/kjweb_async/purify.min.js @@ -0,0 +1,3 @@ +/*! @license DOMPurify 3.0.11 | (c) Cure53 and other contributors | Released under the Apache license 2.0 and Mozilla Public License 2.0 | github.com/cure53/DOMPurify/blob/3.0.11/LICENSE */ +!function(e,t){"object"==typeof exports&&"undefined"!=typeof module?module.exports=t():"function"==typeof define&&define.amd?define(t):(e="undefined"!=typeof globalThis?globalThis:e||self).DOMPurify=t()}(this,(function(){"use strict";const{entries:e,setPrototypeOf:t,isFrozen:n,getPrototypeOf:o,getOwnPropertyDescriptor:r}=Object;let{freeze:i,seal:a,create:l}=Object,{apply:c,construct:s}="undefined"!=typeof Reflect&&Reflect;i||(i=function(e){return e}),a||(a=function(e){return e}),c||(c=function(e,t,n){return e.apply(t,n)}),s||(s=function(e,t){return new e(...t)});const u=b(Array.prototype.forEach),m=b(Array.prototype.pop),p=b(Array.prototype.push),f=b(String.prototype.toLowerCase),d=b(String.prototype.toString),h=b(String.prototype.match),g=b(String.prototype.replace),T=b(String.prototype.indexOf),y=b(String.prototype.trim),E=b(Object.prototype.hasOwnProperty),A=b(RegExp.prototype.test),_=(N=TypeError,function(){for(var e=arguments.length,t=new Array(e),n=0;n1?n-1:0),r=1;r2&&void 0!==arguments[2]?arguments[2]:f;t&&t(e,null);let i=o.length;for(;i--;){let t=o[i];if("string"==typeof t){const e=r(t);e!==t&&(n(o)||(o[i]=e),t=e)}e[t]=!0}return e}function R(e){for(let t=0;t/gm),B=a(/\${[\w\W]*}/gm),W=a(/^data-[\-\w.\u00B7-\uFFFF]/),G=a(/^aria-[\-\w]+$/),Y=a(/^(?:(?:(?:f|ht)tps?|mailto|tel|callto|sms|cid|xmpp):|[^a-z]|[a-z+.\-]+(?:[^a-z+.\-:]|$))/i),j=a(/^(?:\w+script|data):/i),X=a(/[\u0000-\u0020\u00A0\u1680\u180E\u2000-\u2029\u205F\u3000]/g),q=a(/^html$/i),$=a(/^[a-z][.\w]*(-[.\w]+)+$/i);var K=Object.freeze({__proto__:null,MUSTACHE_EXPR:H,ERB_EXPR:z,TMPLIT_EXPR:B,DATA_ATTR:W,ARIA_ATTR:G,IS_ALLOWED_URI:Y,IS_SCRIPT_OR_DATA:j,ATTR_WHITESPACE:X,DOCTYPE_NAME:q,CUSTOM_ELEMENT:$});const V=function(){return"undefined"==typeof window?null:window},Z=function(e,t){if("object"!=typeof e||"function"!=typeof e.createPolicy)return null;let n=null;const o="data-tt-policy-suffix";t&&t.hasAttribute(o)&&(n=t.getAttribute(o));const r="dompurify"+(n?"#"+n:"");try{return e.createPolicy(r,{createHTML:e=>e,createScriptURL:e=>e})}catch(e){return console.warn("TrustedTypes policy "+r+" could not be created."),null}};var J=function t(){let n=arguments.length>0&&void 0!==arguments[0]?arguments[0]:V();const o=e=>t(e);if(o.version="3.0.11",o.removed=[],!n||!n.document||9!==n.document.nodeType)return o.isSupported=!1,o;let{document:r}=n;const a=r,c=a.currentScript,{DocumentFragment:s,HTMLTemplateElement:N,Node:b,Element:R,NodeFilter:H,NamedNodeMap:z=n.NamedNodeMap||n.MozNamedAttrMap,HTMLFormElement:B,DOMParser:W,trustedTypes:G}=n,j=R.prototype,X=L(j,"cloneNode"),$=L(j,"nextSibling"),J=L(j,"childNodes"),Q=L(j,"parentNode");if("function"==typeof N){const e=r.createElement("template");e.content&&e.content.ownerDocument&&(r=e.content.ownerDocument)}let ee,te="";const{implementation:ne,createNodeIterator:oe,createDocumentFragment:re,getElementsByTagName:ie}=r,{importNode:ae}=a;let le={};o.isSupported="function"==typeof e&&"function"==typeof Q&&ne&&void 0!==ne.createHTMLDocument;const{MUSTACHE_EXPR:ce,ERB_EXPR:se,TMPLIT_EXPR:ue,DATA_ATTR:me,ARIA_ATTR:pe,IS_SCRIPT_OR_DATA:fe,ATTR_WHITESPACE:de,CUSTOM_ELEMENT:he}=K;let{IS_ALLOWED_URI:ge}=K,Te=null;const ye=S({},[...D,...C,...O,...v,...M]);let Ee=null;const Ae=S({},[...I,...U,...P,...F]);let _e=Object.seal(l(null,{tagNameCheck:{writable:!0,configurable:!1,enumerable:!0,value:null},attributeNameCheck:{writable:!0,configurable:!1,enumerable:!0,value:null},allowCustomizedBuiltInElements:{writable:!0,configurable:!1,enumerable:!0,value:!1}})),Ne=null,be=null,Se=!0,Re=!0,we=!1,Le=!0,De=!1,Ce=!0,Oe=!1,xe=!1,ve=!1,ke=!1,Me=!1,Ie=!1,Ue=!0,Pe=!1;const Fe="user-content-";let He=!0,ze=!1,Be={},We=null;const Ge=S({},["annotation-xml","audio","colgroup","desc","foreignobject","head","iframe","math","mi","mn","mo","ms","mtext","noembed","noframes","noscript","plaintext","script","style","svg","template","thead","title","video","xmp"]);let Ye=null;const je=S({},["audio","video","img","source","image","track"]);let Xe=null;const qe=S({},["alt","class","for","id","label","name","pattern","placeholder","role","summary","title","value","style","xmlns"]),$e="http://www.w3.org/1998/Math/MathML",Ke="http://www.w3.org/2000/svg",Ve="http://www.w3.org/1999/xhtml";let Ze=Ve,Je=!1,Qe=null;const et=S({},[$e,Ke,Ve],d);let tt=null;const nt=["application/xhtml+xml","text/html"],ot="text/html";let rt=null,it=null;const at=r.createElement("form"),lt=function(e){return e instanceof RegExp||e instanceof Function},ct=function(){let e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};if(!it||it!==e){if(e&&"object"==typeof e||(e={}),e=w(e),tt=-1===nt.indexOf(e.PARSER_MEDIA_TYPE)?ot:e.PARSER_MEDIA_TYPE,rt="application/xhtml+xml"===tt?d:f,Te=E(e,"ALLOWED_TAGS")?S({},e.ALLOWED_TAGS,rt):ye,Ee=E(e,"ALLOWED_ATTR")?S({},e.ALLOWED_ATTR,rt):Ae,Qe=E(e,"ALLOWED_NAMESPACES")?S({},e.ALLOWED_NAMESPACES,d):et,Xe=E(e,"ADD_URI_SAFE_ATTR")?S(w(qe),e.ADD_URI_SAFE_ATTR,rt):qe,Ye=E(e,"ADD_DATA_URI_TAGS")?S(w(je),e.ADD_DATA_URI_TAGS,rt):je,We=E(e,"FORBID_CONTENTS")?S({},e.FORBID_CONTENTS,rt):Ge,Ne=E(e,"FORBID_TAGS")?S({},e.FORBID_TAGS,rt):{},be=E(e,"FORBID_ATTR")?S({},e.FORBID_ATTR,rt):{},Be=!!E(e,"USE_PROFILES")&&e.USE_PROFILES,Se=!1!==e.ALLOW_ARIA_ATTR,Re=!1!==e.ALLOW_DATA_ATTR,we=e.ALLOW_UNKNOWN_PROTOCOLS||!1,Le=!1!==e.ALLOW_SELF_CLOSE_IN_ATTR,De=e.SAFE_FOR_TEMPLATES||!1,Ce=!1!==e.SAFE_FOR_XML,Oe=e.WHOLE_DOCUMENT||!1,ke=e.RETURN_DOM||!1,Me=e.RETURN_DOM_FRAGMENT||!1,Ie=e.RETURN_TRUSTED_TYPE||!1,ve=e.FORCE_BODY||!1,Ue=!1!==e.SANITIZE_DOM,Pe=e.SANITIZE_NAMED_PROPS||!1,He=!1!==e.KEEP_CONTENT,ze=e.IN_PLACE||!1,ge=e.ALLOWED_URI_REGEXP||Y,Ze=e.NAMESPACE||Ve,_e=e.CUSTOM_ELEMENT_HANDLING||{},e.CUSTOM_ELEMENT_HANDLING&<(e.CUSTOM_ELEMENT_HANDLING.tagNameCheck)&&(_e.tagNameCheck=e.CUSTOM_ELEMENT_HANDLING.tagNameCheck),e.CUSTOM_ELEMENT_HANDLING&<(e.CUSTOM_ELEMENT_HANDLING.attributeNameCheck)&&(_e.attributeNameCheck=e.CUSTOM_ELEMENT_HANDLING.attributeNameCheck),e.CUSTOM_ELEMENT_HANDLING&&"boolean"==typeof e.CUSTOM_ELEMENT_HANDLING.allowCustomizedBuiltInElements&&(_e.allowCustomizedBuiltInElements=e.CUSTOM_ELEMENT_HANDLING.allowCustomizedBuiltInElements),De&&(Re=!1),Me&&(ke=!0),Be&&(Te=S({},M),Ee=[],!0===Be.html&&(S(Te,D),S(Ee,I)),!0===Be.svg&&(S(Te,C),S(Ee,U),S(Ee,F)),!0===Be.svgFilters&&(S(Te,O),S(Ee,U),S(Ee,F)),!0===Be.mathMl&&(S(Te,v),S(Ee,P),S(Ee,F))),e.ADD_TAGS&&(Te===ye&&(Te=w(Te)),S(Te,e.ADD_TAGS,rt)),e.ADD_ATTR&&(Ee===Ae&&(Ee=w(Ee)),S(Ee,e.ADD_ATTR,rt)),e.ADD_URI_SAFE_ATTR&&S(Xe,e.ADD_URI_SAFE_ATTR,rt),e.FORBID_CONTENTS&&(We===Ge&&(We=w(We)),S(We,e.FORBID_CONTENTS,rt)),He&&(Te["#text"]=!0),Oe&&S(Te,["html","head","body"]),Te.table&&(S(Te,["tbody"]),delete Ne.tbody),e.TRUSTED_TYPES_POLICY){if("function"!=typeof e.TRUSTED_TYPES_POLICY.createHTML)throw _('TRUSTED_TYPES_POLICY configuration option must provide a "createHTML" hook.');if("function"!=typeof e.TRUSTED_TYPES_POLICY.createScriptURL)throw _('TRUSTED_TYPES_POLICY configuration option must provide a "createScriptURL" hook.');ee=e.TRUSTED_TYPES_POLICY,te=ee.createHTML("")}else void 0===ee&&(ee=Z(G,c)),null!==ee&&"string"==typeof te&&(te=ee.createHTML(""));i&&i(e),it=e}},st=S({},["mi","mo","mn","ms","mtext"]),ut=S({},["foreignobject","desc","title","annotation-xml"]),mt=S({},["title","style","font","a","script"]),pt=S({},[...C,...O,...x]),ft=S({},[...v,...k]),dt=function(e){let t=Q(e);t&&t.tagName||(t={namespaceURI:Ze,tagName:"template"});const n=f(e.tagName),o=f(t.tagName);return!!Qe[e.namespaceURI]&&(e.namespaceURI===Ke?t.namespaceURI===Ve?"svg"===n:t.namespaceURI===$e?"svg"===n&&("annotation-xml"===o||st[o]):Boolean(pt[n]):e.namespaceURI===$e?t.namespaceURI===Ve?"math"===n:t.namespaceURI===Ke?"math"===n&&ut[o]:Boolean(ft[n]):e.namespaceURI===Ve?!(t.namespaceURI===Ke&&!ut[o])&&(!(t.namespaceURI===$e&&!st[o])&&(!ft[n]&&(mt[n]||!pt[n]))):!("application/xhtml+xml"!==tt||!Qe[e.namespaceURI]))},ht=function(e){p(o.removed,{element:e});try{e.parentNode.removeChild(e)}catch(t){e.remove()}},gt=function(e,t){try{p(o.removed,{attribute:t.getAttributeNode(e),from:t})}catch(e){p(o.removed,{attribute:null,from:t})}if(t.removeAttribute(e),"is"===e&&!Ee[e])if(ke||Me)try{ht(t)}catch(e){}else try{t.setAttribute(e,"")}catch(e){}},Tt=function(e){let t=null,n=null;if(ve)e=""+e;else{const t=h(e,/^[\r\n\t ]+/);n=t&&t[0]}"application/xhtml+xml"===tt&&Ze===Ve&&(e=''+e+"");const o=ee?ee.createHTML(e):e;if(Ze===Ve)try{t=(new W).parseFromString(o,tt)}catch(e){}if(!t||!t.documentElement){t=ne.createDocument(Ze,"template",null);try{t.documentElement.innerHTML=Je?te:o}catch(e){}}const i=t.body||t.documentElement;return e&&n&&i.insertBefore(r.createTextNode(n),i.childNodes[0]||null),Ze===Ve?ie.call(t,Oe?"html":"body")[0]:Oe?t.documentElement:i},yt=function(e){return oe.call(e.ownerDocument||e,e,H.SHOW_ELEMENT|H.SHOW_COMMENT|H.SHOW_TEXT|H.SHOW_PROCESSING_INSTRUCTION|H.SHOW_CDATA_SECTION,null)},Et=function(e){return e instanceof B&&("string"!=typeof e.nodeName||"string"!=typeof e.textContent||"function"!=typeof e.removeChild||!(e.attributes instanceof z)||"function"!=typeof e.removeAttribute||"function"!=typeof e.setAttribute||"string"!=typeof e.namespaceURI||"function"!=typeof e.insertBefore||"function"!=typeof e.hasChildNodes)},At=function(e){return"function"==typeof b&&e instanceof b},_t=function(e,t,n){le[e]&&u(le[e],(e=>{e.call(o,t,n,it)}))},Nt=function(e){let t=null;if(_t("beforeSanitizeElements",e,null),Et(e))return ht(e),!0;const n=rt(e.nodeName);if(_t("uponSanitizeElement",e,{tagName:n,allowedTags:Te}),e.hasChildNodes()&&!At(e.firstElementChild)&&A(/<[/\w]/g,e.innerHTML)&&A(/<[/\w]/g,e.textContent))return ht(e),!0;if(7===e.nodeType)return ht(e),!0;if(Ce&&8===e.nodeType&&A(/<[/\w]/g,e.data))return ht(e),!0;if(!Te[n]||Ne[n]){if(!Ne[n]&&St(n)){if(_e.tagNameCheck instanceof RegExp&&A(_e.tagNameCheck,n))return!1;if(_e.tagNameCheck instanceof Function&&_e.tagNameCheck(n))return!1}if(He&&!We[n]){const t=Q(e)||e.parentNode,n=J(e)||e.childNodes;if(n&&t){for(let o=n.length-1;o>=0;--o)t.insertBefore(X(n[o],!0),$(e))}}return ht(e),!0}return e instanceof R&&!dt(e)?(ht(e),!0):"noscript"!==n&&"noembed"!==n&&"noframes"!==n||!A(/<\/no(script|embed|frames)/i,e.innerHTML)?(De&&3===e.nodeType&&(t=e.textContent,u([ce,se,ue],(e=>{t=g(t,e," ")})),e.textContent!==t&&(p(o.removed,{element:e.cloneNode()}),e.textContent=t)),_t("afterSanitizeElements",e,null),!1):(ht(e),!0)},bt=function(e,t,n){if(Ue&&("id"===t||"name"===t)&&(n in r||n in at))return!1;if(Re&&!be[t]&&A(me,t));else if(Se&&A(pe,t));else if(!Ee[t]||be[t]){if(!(St(e)&&(_e.tagNameCheck instanceof RegExp&&A(_e.tagNameCheck,e)||_e.tagNameCheck instanceof Function&&_e.tagNameCheck(e))&&(_e.attributeNameCheck instanceof RegExp&&A(_e.attributeNameCheck,t)||_e.attributeNameCheck instanceof Function&&_e.attributeNameCheck(t))||"is"===t&&_e.allowCustomizedBuiltInElements&&(_e.tagNameCheck instanceof RegExp&&A(_e.tagNameCheck,n)||_e.tagNameCheck instanceof Function&&_e.tagNameCheck(n))))return!1}else if(Xe[t]);else if(A(ge,g(n,de,"")));else if("src"!==t&&"xlink:href"!==t&&"href"!==t||"script"===e||0!==T(n,"data:")||!Ye[e]){if(we&&!A(fe,g(n,de,"")));else if(n)return!1}else;return!0},St=function(e){return"annotation-xml"!==e&&h(e,he)},Rt=function(e){_t("beforeSanitizeAttributes",e,null);const{attributes:t}=e;if(!t)return;const n={attrName:"",attrValue:"",keepAttr:!0,allowedAttributes:Ee};let r=t.length;for(;r--;){const i=t[r],{name:a,namespaceURI:l,value:c}=i,s=rt(a);let p="value"===a?c:y(c);if(n.attrName=s,n.attrValue=p,n.keepAttr=!0,n.forceKeepAttr=void 0,_t("uponSanitizeAttribute",e,n),p=n.attrValue,n.forceKeepAttr)continue;if(gt(a,e),!n.keepAttr)continue;if(!Le&&A(/\/>/i,p)){gt(a,e);continue}De&&u([ce,se,ue],(e=>{p=g(p,e," ")}));const f=rt(e.nodeName);if(bt(f,s,p)){if(!Pe||"id"!==s&&"name"!==s||(gt(a,e),p=Fe+p),ee&&"object"==typeof G&&"function"==typeof G.getAttributeType)if(l);else switch(G.getAttributeType(f,s)){case"TrustedHTML":p=ee.createHTML(p);break;case"TrustedScriptURL":p=ee.createScriptURL(p)}try{l?e.setAttributeNS(l,a,p):e.setAttribute(a,p),m(o.removed)}catch(e){}}}_t("afterSanitizeAttributes",e,null)},wt=function e(t){let n=null;const o=yt(t);for(_t("beforeSanitizeShadowDOM",t,null);n=o.nextNode();)_t("uponSanitizeShadowNode",n,null),Nt(n)||(n.content instanceof s&&e(n.content),Rt(n));_t("afterSanitizeShadowDOM",t,null)};return o.sanitize=function(e){let t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},n=null,r=null,i=null,l=null;if(Je=!e,Je&&(e="\x3c!--\x3e"),"string"!=typeof e&&!At(e)){if("function"!=typeof e.toString)throw _("toString is not a function");if("string"!=typeof(e=e.toString()))throw _("dirty is not a string, aborting")}if(!o.isSupported)return e;if(xe||ct(t),o.removed=[],"string"==typeof e&&(ze=!1),ze){if(e.nodeName){const t=rt(e.nodeName);if(!Te[t]||Ne[t])throw _("root node is forbidden and cannot be sanitized in-place")}}else if(e instanceof b)n=Tt("\x3c!----\x3e"),r=n.ownerDocument.importNode(e,!0),1===r.nodeType&&"BODY"===r.nodeName||"HTML"===r.nodeName?n=r:n.appendChild(r);else{if(!ke&&!De&&!Oe&&-1===e.indexOf("<"))return ee&&Ie?ee.createHTML(e):e;if(n=Tt(e),!n)return ke?null:Ie?te:""}n&&ve&&ht(n.firstChild);const c=yt(ze?e:n);for(;i=c.nextNode();)Nt(i)||(i.content instanceof s&&wt(i.content),Rt(i));if(ze)return e;if(ke){if(Me)for(l=re.call(n.ownerDocument);n.firstChild;)l.appendChild(n.firstChild);else l=n;return(Ee.shadowroot||Ee.shadowrootmode)&&(l=ae.call(a,l,!0)),l}let m=Oe?n.outerHTML:n.innerHTML;return Oe&&Te["!doctype"]&&n.ownerDocument&&n.ownerDocument.doctype&&n.ownerDocument.doctype.name&&A(q,n.ownerDocument.doctype.name)&&(m="\n"+m),De&&u([ce,se,ue],(e=>{m=g(m,e," ")})),ee&&Ie?ee.createHTML(m):m},o.setConfig=function(){let e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};ct(e),xe=!0},o.clearConfig=function(){it=null,xe=!1},o.isValidAttribute=function(e,t,n){it||ct({});const o=rt(e),r=rt(t);return bt(o,r,n)},o.addHook=function(e,t){"function"==typeof t&&(le[e]=le[e]||[],p(le[e],t))},o.removeHook=function(e){if(le[e])return m(le[e])},o.removeHooks=function(e){le[e]&&(le[e]=[])},o.removeAllHooks=function(){le={}},o}();return J})); +//# sourceMappingURL=purify.min.js.map diff --git a/custom_nodes/ComfyUI-KJNodes-main/kjweb_async/svg-path-properties.min.js b/custom_nodes/ComfyUI-KJNodes-main/kjweb_async/svg-path-properties.min.js new file mode 100644 index 0000000000000000000000000000000000000000..88d47e0de4c54f881083164c20045a7e8b621caf --- /dev/null +++ b/custom_nodes/ComfyUI-KJNodes-main/kjweb_async/svg-path-properties.min.js @@ -0,0 +1,2 @@ +// http://geoexamples.com/path-properties/ v1.2.0 Copyright 2023 Roger Veciana i Rovira +!function(t,n){"object"==typeof exports&&"undefined"!=typeof module?n(exports):"function"==typeof define&&define.amd?define(["exports"],n):n((t="undefined"!=typeof globalThis?globalThis:t||self).svgPathProperties={})}(this,(function(t){"use strict";function n(t,n){for(var e=0;et.length)&&(n=t.length);for(var e=0,i=new Array(n);eu.length&&(t=u.length);var n=f({x:u.x0,y:u.y0},u.rx,u.ry,u.xAxisRotate,u.LargeArcFlag,u.SweepFlag,{x:u.x1,y:u.y1},t/u.length);return{x:n.x,y:n.y}})),i(this,"getTangentAtLength",(function(t){t<0?t=0:t>u.length&&(t=u.length);var n,e=.05,i=u.getPointAtLength(t);t<0?t=0:t>u.length&&(t=u.length);var r=(n=t1&&(n=Math.sqrt(c)*n,e=Math.sqrt(c)*e);var f=(Math.pow(n,2)*Math.pow(e,2)-Math.pow(n,2)*Math.pow(l.y,2)-Math.pow(e,2)*Math.pow(l.x,2))/(Math.pow(n,2)*Math.pow(l.y,2)+Math.pow(e,2)*Math.pow(l.x,2));f=f<0?0:f;var y=(r!==h?1:-1)*Math.sqrt(f),v=y*(n*l.y/e),M=y*(-e*l.x/n),L={x:Math.cos(o)*v-Math.sin(o)*M+(t.x+s.x)/2,y:Math.sin(o)*v+Math.cos(o)*M+(t.y+s.y)/2},d={x:(l.x-v)/n,y:(l.y-M)/e},A=w({x:1,y:0},d),b=w(d,{x:(-l.x-v)/n,y:(-l.y-M)/e});!h&&b>0?b-=2*Math.PI:h&&b<0&&(b+=2*Math.PI);var P=A+(b%=2*Math.PI)*a,m=n*Math.cos(P),T=e*Math.sin(P);return{x:Math.cos(o)*m-Math.sin(o)*T+L.x,y:Math.sin(o)*m+Math.cos(o)*T+L.y,ellipticalArcStartAngle:A,ellipticalArcEndAngle:A+b,ellipticalArcAngle:P,ellipticalArcCenter:L,resultantRx:n,resultantRy:e}},y=function(t,n){t=t||500;for(var e,i=0,r=[],h=[],s=n(0),a=0;a0?Math.sqrt(l*l+c):0,y=u*u+c>0?Math.sqrt(u*u+c):0,p=u+Math.sqrt(u*u+c)!==0&&(l+f)/(u+y)!=0?c*Math.log(Math.abs((l+f)/(u+y))):0;return Math.sqrt(a)/2*(l*f-u*y+p)},_=function(t,n,e){return{x:2*(1-e)*(t[1]-t[0])+2*e*(t[2]-t[1]),y:2*(1-e)*(n[1]-n[0])+2*e*(n[2]-n[1])}};function S(t,n,e){var i=N(1,e,t),r=N(1,e,n),h=i*i+r*r;return Math.sqrt(h)}var N=function t(n,e,i){var r,h,s=i.length-1;if(0===s)return 0;if(0===n){h=0;for(var a=0;a<=s;a++)h+=A[s][a]*Math.pow(1-e,s-a)*Math.pow(e,a)*i[a];return h}r=new Array(s);for(var o=0;o.001;){var a=e(r+h),o=Math.abs(t-a)/n;if(o500)break}return r},j=e((function(t,n,e,r,h,s,a,o){var g=this;i(this,"a",void 0),i(this,"b",void 0),i(this,"c",void 0),i(this,"d",void 0),i(this,"length",void 0),i(this,"getArcLength",void 0),i(this,"getPoint",void 0),i(this,"getDerivative",void 0),i(this,"getTotalLength",(function(){return g.length})),i(this,"getPointAtLength",(function(t){var n=[g.a.x,g.b.x,g.c.x,g.d.x],e=[g.a.y,g.b.y,g.c.y,g.d.y],i=C(t,g.length,(function(t){return g.getArcLength(n,e,t)}));return g.getPoint(n,e,i)})),i(this,"getTangentAtLength",(function(t){var n=[g.a.x,g.b.x,g.c.x,g.d.x],e=[g.a.y,g.b.y,g.c.y,g.d.y],i=C(t,g.length,(function(t){return g.getArcLength(n,e,t)})),r=g.getDerivative(n,e,i),h=Math.sqrt(r.x*r.x+r.y*r.y);return h>0?{x:r.x/h,y:r.y/h}:{x:0,y:0}})),i(this,"getPropertiesAtLength",(function(t){var n,e=[g.a.x,g.b.x,g.c.x,g.d.x],i=[g.a.y,g.b.y,g.c.y,g.d.y],r=C(t,g.length,(function(t){return g.getArcLength(e,i,t)})),h=g.getDerivative(e,i,r),s=Math.sqrt(h.x*h.x+h.y*h.y);n=s>0?{x:h.x/s,y:h.y/s}:{x:0,y:0};var a=g.getPoint(e,i,r);return{x:a.x,y:a.y,tangentX:n.x,tangentY:n.y}})),i(this,"getC",(function(){return g.c})),i(this,"getD",(function(){return g.d})),this.a={x:t,y:n},this.b={x:e,y:r},this.c={x:h,y:s},void 0!==a&&void 0!==o?(this.getArcLength=m,this.getPoint=b,this.getDerivative=P,this.d={x:a,y:o}):(this.getArcLength=q,this.getPoint=T,this.getDerivative=_,this.d={x:0,y:0}),this.length=this.getArcLength([this.a.x,this.b.x,this.c.x,this.d.x],[this.a.y,this.b.y,this.c.y,this.d.y],1)})),O=e((function(t){var n=this;i(this,"length",0),i(this,"partial_lengths",[]),i(this,"functions",[]),i(this,"initial_point",null),i(this,"getPartAtLength",(function(t){t<0?t=0:t>n.length&&(t=n.length);for(var e=n.partial_lengths.length-1;n.partial_lengths[e]>=t&&e>0;)e--;return e++,{fraction:t-n.partial_lengths[e-1],i:e}})),i(this,"getTotalLength",(function(){return n.length})),i(this,"getPointAtLength",(function(t){var e=n.getPartAtLength(t),i=n.functions[e.i];if(i)return i.getPointAtLength(e.fraction);if(n.initial_point)return n.initial_point;throw new Error("Wrong function at this part.")})),i(this,"getTangentAtLength",(function(t){var e=n.getPartAtLength(t),i=n.functions[e.i];if(i)return i.getTangentAtLength(e.fraction);if(n.initial_point)return{x:0,y:0};throw new Error("Wrong function at this part.")})),i(this,"getPropertiesAtLength",(function(t){var e=n.getPartAtLength(t),i=n.functions[e.i];if(i)return i.getPropertiesAtLength(e.fraction);if(n.initial_point)return{x:n.initial_point.x,y:n.initial_point.y,tangentX:0,tangentY:0};throw new Error("Wrong function at this part.")})),i(this,"getParts",(function(){for(var t=[],e=0;e0?t:"M0,0").match(o);if(!n)throw new Error("No path elements found in string ".concat(t));return n.reduce((function(t,n){var e=n.charAt(0),i=e.toLowerCase(),h=u(n.substring(1));if("m"===i&&h.length>2&&(t.push([e].concat(r(h.splice(0,2)))),i="l",e="m"===e?"l":"L"),"a"===i.toLowerCase()&&(5===h.length||6===h.length)){var s=n.substring(1).trim().split(" ");h=[Number(s[0]),Number(s[1]),Number(s[2]),Number(s[3].charAt(0)),Number(s[3].charAt(1)),Number(s[3].substring(2)),Number(s[4])]}for(;h.length>=0;){if(h.length===a[i]){t.push([e].concat(r(h.splice(0,a[i]))));break}if(h.length0?(this.length+=e.getTotalLength(),this.functions.push(e),s=[h[y][5]+s[0],h[y][6]+s[1]]):this.functions.push(new l(s[0],s[0],s[1],s[1]));else if("S"===h[y][0]){if(y>0&&["C","c","S","s"].indexOf(h[y-1][0])>-1){if(e){var p=e.getC();e=new j(s[0],s[1],2*s[0]-p.x,2*s[1]-p.y,h[y][1],h[y][2],h[y][3],h[y][4])}}else e=new j(s[0],s[1],s[0],s[1],h[y][1],h[y][2],h[y][3],h[y][4]);e&&(this.length+=e.getTotalLength(),s=[h[y][3],h[y][4]],this.functions.push(e))}else if("s"===h[y][0]){if(y>0&&["C","c","S","s"].indexOf(h[y-1][0])>-1){if(e){var x=e.getC(),v=e.getD();e=new j(s[0],s[1],s[0]+v.x-x.x,s[1]+v.y-x.y,s[0]+h[y][1],s[1]+h[y][2],s[0]+h[y][3],s[1]+h[y][4])}}else e=new j(s[0],s[1],s[0],s[1],s[0]+h[y][1],s[1]+h[y][2],s[0]+h[y][3],s[1]+h[y][4]);e&&(this.length+=e.getTotalLength(),s=[h[y][3]+s[0],h[y][4]+s[1]],this.functions.push(e))}else if("Q"===h[y][0]){if(s[0]==h[y][1]&&s[1]==h[y][2]){var M=new l(h[y][1],h[y][3],h[y][2],h[y][4]);this.length+=M.getTotalLength(),this.functions.push(M)}else e=new j(s[0],s[1],h[y][1],h[y][2],h[y][3],h[y][4],void 0,void 0),this.length+=e.getTotalLength(),this.functions.push(e);s=[h[y][3],h[y][4]],g=[h[y][1],h[y][2]]}else if("q"===h[y][0]){if(0!=h[y][1]||0!=h[y][2])e=new j(s[0],s[1],s[0]+h[y][1],s[1]+h[y][2],s[0]+h[y][3],s[1]+h[y][4],void 0,void 0),this.length+=e.getTotalLength(),this.functions.push(e);else{var w=new l(s[0]+h[y][1],s[0]+h[y][3],s[1]+h[y][2],s[1]+h[y][4]);this.length+=w.getTotalLength(),this.functions.push(w)}g=[s[0]+h[y][1],s[1]+h[y][2]],s=[h[y][3]+s[0],h[y][4]+s[1]]}else if("T"===h[y][0]){if(y>0&&["Q","q","T","t"].indexOf(h[y-1][0])>-1)e=new j(s[0],s[1],2*s[0]-g[0],2*s[1]-g[1],h[y][1],h[y][2],void 0,void 0),this.functions.push(e),this.length+=e.getTotalLength();else{var L=new l(s[0],h[y][1],s[1],h[y][2]);this.functions.push(L),this.length+=L.getTotalLength()}g=[2*s[0]-g[0],2*s[1]-g[1]],s=[h[y][1],h[y][2]]}else if("t"===h[y][0]){if(y>0&&["Q","q","T","t"].indexOf(h[y-1][0])>-1)e=new j(s[0],s[1],2*s[0]-g[0],2*s[1]-g[1],s[0]+h[y][1],s[1]+h[y][2],void 0,void 0),this.length+=e.getTotalLength(),this.functions.push(e);else{var d=new l(s[0],s[0]+h[y][1],s[1],s[1]+h[y][2]);this.length+=d.getTotalLength(),this.functions.push(d)}g=[2*s[0]-g[0],2*s[1]-g[1]],s=[h[y][1]+s[0],h[y][2]+s[1]]}else if("A"===h[y][0]){var A=new c(s[0],s[1],h[y][1],h[y][2],h[y][3],1===h[y][4],1===h[y][5],h[y][6],h[y][7]);this.length+=A.getTotalLength(),s=[h[y][6],h[y][7]],this.functions.push(A)}else if("a"===h[y][0]){var b=new c(s[0],s[1],h[y][1],h[y][2],h[y][3],1===h[y][4],1===h[y][5],s[0]+h[y][6],s[1]+h[y][7]);this.length+=b.getTotalLength(),s=[s[0]+h[y][6],s[1]+h[y][7]],this.functions.push(b)}this.partial_lengths.push(this.length)}})),E=e((function(t){var n=this;if(i(this,"inst",void 0),i(this,"getTotalLength",(function(){return n.inst.getTotalLength()})),i(this,"getPointAtLength",(function(t){return n.inst.getPointAtLength(t)})),i(this,"getTangentAtLength",(function(t){return n.inst.getTangentAtLength(t)})),i(this,"getPropertiesAtLength",(function(t){return n.inst.getPropertiesAtLength(t)})),i(this,"getParts",(function(){return n.inst.getParts()})),this.inst=new O(t),!(this instanceof E))return new E(t)}));t.svgPathProperties=E})); diff --git a/custom_nodes/ComfyUI-KJNodes-main/nodes/audioscheduler_nodes.py b/custom_nodes/ComfyUI-KJNodes-main/nodes/audioscheduler_nodes.py new file mode 100644 index 0000000000000000000000000000000000000000..69d0422e7da875298f87fe60a7f6d1494530dca2 --- /dev/null +++ b/custom_nodes/ComfyUI-KJNodes-main/nodes/audioscheduler_nodes.py @@ -0,0 +1,251 @@ +# to be used with https://github.com/a1lazydog/ComfyUI-AudioScheduler +import torch +from torchvision.transforms import functional as TF +from PIL import Image, ImageDraw +import numpy as np +from ..utility.utility import pil2tensor +from nodes import MAX_RESOLUTION + +class NormalizedAmplitudeToMask: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "normalized_amp": ("NORMALIZED_AMPLITUDE",), + "width": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}), + "height": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}), + "frame_offset": ("INT", {"default": 0,"min": -255, "max": 255, "step": 1}), + "location_x": ("INT", {"default": 256,"min": 0, "max": 4096, "step": 1}), + "location_y": ("INT", {"default": 256,"min": 0, "max": 4096, "step": 1}), + "size": ("INT", {"default": 128,"min": 8, "max": 4096, "step": 1}), + "shape": ( + [ + 'none', + 'circle', + 'square', + 'triangle', + ], + { + "default": 'none' + }), + "color": ( + [ + 'white', + 'amplitude', + ], + { + "default": 'amplitude' + }), + },} + + CATEGORY = "KJNodes/audio" + RETURN_TYPES = ("MASK",) + FUNCTION = "convert" + DESCRIPTION = """ +Works as a bridge to the AudioScheduler -nodes: +https://github.com/a1lazydog/ComfyUI-AudioScheduler +Creates masks based on the normalized amplitude. +""" + + def convert(self, normalized_amp, width, height, frame_offset, shape, location_x, location_y, size, color): + # Ensure normalized_amp is an array and within the range [0, 1] + normalized_amp = np.clip(normalized_amp, 0.0, 1.0) + + # Offset the amplitude values by rolling the array + normalized_amp = np.roll(normalized_amp, frame_offset) + + # Initialize an empty list to hold the image tensors + out = [] + # Iterate over each amplitude value to create an image + for amp in normalized_amp: + # Scale the amplitude value to cover the full range of grayscale values + if color == 'amplitude': + grayscale_value = int(amp * 255) + elif color == 'white': + grayscale_value = 255 + # Convert the grayscale value to an RGB format + gray_color = (grayscale_value, grayscale_value, grayscale_value) + finalsize = size * amp + + if shape == 'none': + shapeimage = Image.new("RGB", (width, height), gray_color) + else: + shapeimage = Image.new("RGB", (width, height), "black") + + draw = ImageDraw.Draw(shapeimage) + if shape == 'circle' or shape == 'square': + # Define the bounding box for the shape + left_up_point = (location_x - finalsize, location_y - finalsize) + right_down_point = (location_x + finalsize,location_y + finalsize) + two_points = [left_up_point, right_down_point] + + if shape == 'circle': + draw.ellipse(two_points, fill=gray_color) + elif shape == 'square': + draw.rectangle(two_points, fill=gray_color) + + elif shape == 'triangle': + # Define the points for the triangle + left_up_point = (location_x - finalsize, location_y + finalsize) # bottom left + right_down_point = (location_x + finalsize, location_y + finalsize) # bottom right + top_point = (location_x, location_y) # top point + draw.polygon([top_point, left_up_point, right_down_point], fill=gray_color) + + shapeimage = pil2tensor(shapeimage) + mask = shapeimage[:, :, :, 0] + out.append(mask) + + return (torch.cat(out, dim=0),) + +class NormalizedAmplitudeToFloatList: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "normalized_amp": ("NORMALIZED_AMPLITUDE",), + },} + + CATEGORY = "KJNodes/audio" + RETURN_TYPES = ("FLOAT",) + FUNCTION = "convert" + DESCRIPTION = """ +Works as a bridge to the AudioScheduler -nodes: +https://github.com/a1lazydog/ComfyUI-AudioScheduler +Creates a list of floats from the normalized amplitude. +""" + + def convert(self, normalized_amp): + # Ensure normalized_amp is an array and within the range [0, 1] + normalized_amp = np.clip(normalized_amp, 0.0, 1.0) + return (normalized_amp.tolist(),) + +class OffsetMaskByNormalizedAmplitude: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "normalized_amp": ("NORMALIZED_AMPLITUDE",), + "mask": ("MASK",), + "x": ("INT", { "default": 0, "min": -4096, "max": MAX_RESOLUTION, "step": 1, "display": "number" }), + "y": ("INT", { "default": 0, "min": -4096, "max": MAX_RESOLUTION, "step": 1, "display": "number" }), + "rotate": ("BOOLEAN", { "default": False }), + "angle_multiplier": ("FLOAT", { "default": 0.0, "min": -1.0, "max": 1.0, "step": 0.001, "display": "number" }), + } + } + + RETURN_TYPES = ("MASK",) + RETURN_NAMES = ("mask",) + FUNCTION = "offset" + CATEGORY = "KJNodes/audio" + DESCRIPTION = """ +Works as a bridge to the AudioScheduler -nodes: +https://github.com/a1lazydog/ComfyUI-AudioScheduler +Offsets masks based on the normalized amplitude. +""" + + def offset(self, mask, x, y, angle_multiplier, rotate, normalized_amp): + + # Ensure normalized_amp is an array and within the range [0, 1] + offsetmask = mask.clone() + normalized_amp = np.clip(normalized_amp, 0.0, 1.0) + + batch_size, height, width = mask.shape + + if rotate: + for i in range(batch_size): + rotation_amp = int(normalized_amp[i] * (360 * angle_multiplier)) + rotation_angle = rotation_amp + offsetmask[i] = TF.rotate(offsetmask[i].unsqueeze(0), rotation_angle).squeeze(0) + if x != 0 or y != 0: + for i in range(batch_size): + offset_amp = normalized_amp[i] * 10 + shift_x = min(x*offset_amp, width-1) + shift_y = min(y*offset_amp, height-1) + if shift_x != 0: + offsetmask[i] = torch.roll(offsetmask[i], shifts=int(shift_x), dims=1) + if shift_y != 0: + offsetmask[i] = torch.roll(offsetmask[i], shifts=int(shift_y), dims=0) + + return offsetmask, + +class ImageTransformByNormalizedAmplitude: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "normalized_amp": ("NORMALIZED_AMPLITUDE",), + "zoom_scale": ("FLOAT", { "default": 0.0, "min": -1.0, "max": 1.0, "step": 0.001, "display": "number" }), + "x_offset": ("INT", { "default": 0, "min": (1 -MAX_RESOLUTION), "max": MAX_RESOLUTION, "step": 1, "display": "number" }), + "y_offset": ("INT", { "default": 0, "min": (1 -MAX_RESOLUTION), "max": MAX_RESOLUTION, "step": 1, "display": "number" }), + "cumulative": ("BOOLEAN", { "default": False }), + "image": ("IMAGE",), + }} + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "amptransform" + CATEGORY = "KJNodes/audio" + DESCRIPTION = """ +Works as a bridge to the AudioScheduler -nodes: +https://github.com/a1lazydog/ComfyUI-AudioScheduler +Transforms image based on the normalized amplitude. +""" + + def amptransform(self, image, normalized_amp, zoom_scale, cumulative, x_offset, y_offset): + # Ensure normalized_amp is an array and within the range [0, 1] + normalized_amp = np.clip(normalized_amp, 0.0, 1.0) + transformed_images = [] + + # Initialize the cumulative zoom factor + prev_amp = 0.0 + + for i in range(image.shape[0]): + img = image[i] # Get the i-th image in the batch + amp = normalized_amp[i] # Get the corresponding amplitude value + + # Incrementally increase the cumulative zoom factor + if cumulative: + prev_amp += amp + amp += prev_amp + + # Convert the image tensor from BxHxWxC to CxHxW format expected by torchvision + img = img.permute(2, 0, 1) + + # Convert PyTorch tensor to PIL Image for processing + pil_img = TF.to_pil_image(img) + + # Calculate the crop size based on the amplitude + width, height = pil_img.size + crop_size = int(min(width, height) * (1 - amp * zoom_scale)) + crop_size = max(crop_size, 1) + + # Calculate the crop box coordinates (centered crop) + left = (width - crop_size) // 2 + top = (height - crop_size) // 2 + right = (width + crop_size) // 2 + bottom = (height + crop_size) // 2 + + # Crop and resize back to original size + cropped_img = TF.crop(pil_img, top, left, crop_size, crop_size) + resized_img = TF.resize(cropped_img, (height, width)) + + # Convert back to tensor in CxHxW format + tensor_img = TF.to_tensor(resized_img) + + # Convert the tensor back to BxHxWxC format + tensor_img = tensor_img.permute(1, 2, 0) + + # Offset the image based on the amplitude + offset_amp = amp * 10 # Calculate the offset magnitude based on the amplitude + shift_x = min(x_offset * offset_amp, img.shape[1] - 1) # Calculate the shift in x direction + shift_y = min(y_offset * offset_amp, img.shape[0] - 1) # Calculate the shift in y direction + + # Apply the offset to the image tensor + if shift_x != 0: + tensor_img = torch.roll(tensor_img, shifts=int(shift_x), dims=1) + if shift_y != 0: + tensor_img = torch.roll(tensor_img, shifts=int(shift_y), dims=0) + + # Add to the list + transformed_images.append(tensor_img) + + # Stack all transformed images into a batch + transformed_batch = torch.stack(transformed_images) + + return (transformed_batch,) \ No newline at end of file diff --git a/custom_nodes/ComfyUI-KJNodes-main/nodes/batchcrop_nodes.py b/custom_nodes/ComfyUI-KJNodes-main/nodes/batchcrop_nodes.py new file mode 100644 index 0000000000000000000000000000000000000000..61e7446f7567f74421d4b05742cc3a340fec73c9 --- /dev/null +++ b/custom_nodes/ComfyUI-KJNodes-main/nodes/batchcrop_nodes.py @@ -0,0 +1,757 @@ +from ..utility.utility import tensor2pil, pil2tensor +from PIL import Image, ImageDraw, ImageFilter +import numpy as np +import torch +from torchvision.transforms import Resize, CenterCrop, InterpolationMode +import math + +#based on nodes from mtb https://github.com/melMass/comfy_mtb + +def bbox_to_region(bbox, target_size=None): + bbox = bbox_check(bbox, target_size) + return (bbox[0], bbox[1], bbox[0] + bbox[2], bbox[1] + bbox[3]) + +def bbox_check(bbox, target_size=None): + if not target_size: + return bbox + + new_bbox = ( + bbox[0], + bbox[1], + min(target_size[0] - bbox[0], bbox[2]), + min(target_size[1] - bbox[1], bbox[3]), + ) + return new_bbox + +class BatchCropFromMask: + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "original_images": ("IMAGE",), + "masks": ("MASK",), + "crop_size_mult": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.001}), + "bbox_smooth_alpha": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}), + }, + } + + RETURN_TYPES = ( + "IMAGE", + "IMAGE", + "BBOX", + "INT", + "INT", + ) + RETURN_NAMES = ( + "original_images", + "cropped_images", + "bboxes", + "width", + "height", + ) + FUNCTION = "crop" + CATEGORY = "KJNodes/masking" + + def smooth_bbox_size(self, prev_bbox_size, curr_bbox_size, alpha): + if alpha == 0: + return prev_bbox_size + return round(alpha * curr_bbox_size + (1 - alpha) * prev_bbox_size) + + def smooth_center(self, prev_center, curr_center, alpha=0.5): + if alpha == 0: + return prev_center + return ( + round(alpha * curr_center[0] + (1 - alpha) * prev_center[0]), + round(alpha * curr_center[1] + (1 - alpha) * prev_center[1]) + ) + + def crop(self, masks, original_images, crop_size_mult, bbox_smooth_alpha): + + bounding_boxes = [] + cropped_images = [] + + self.max_bbox_width = 0 + self.max_bbox_height = 0 + + # First, calculate the maximum bounding box size across all masks + curr_max_bbox_width = 0 + curr_max_bbox_height = 0 + for mask in masks: + _mask = tensor2pil(mask)[0] + non_zero_indices = np.nonzero(np.array(_mask)) + min_x, max_x = np.min(non_zero_indices[1]), np.max(non_zero_indices[1]) + min_y, max_y = np.min(non_zero_indices[0]), np.max(non_zero_indices[0]) + width = max_x - min_x + height = max_y - min_y + curr_max_bbox_width = max(curr_max_bbox_width, width) + curr_max_bbox_height = max(curr_max_bbox_height, height) + + # Smooth the changes in the bounding box size + self.max_bbox_width = self.smooth_bbox_size(self.max_bbox_width, curr_max_bbox_width, bbox_smooth_alpha) + self.max_bbox_height = self.smooth_bbox_size(self.max_bbox_height, curr_max_bbox_height, bbox_smooth_alpha) + + # Apply the crop size multiplier + self.max_bbox_width = round(self.max_bbox_width * crop_size_mult) + self.max_bbox_height = round(self.max_bbox_height * crop_size_mult) + bbox_aspect_ratio = self.max_bbox_width / self.max_bbox_height + + # Then, for each mask and corresponding image... + for i, (mask, img) in enumerate(zip(masks, original_images)): + _mask = tensor2pil(mask)[0] + non_zero_indices = np.nonzero(np.array(_mask)) + min_x, max_x = np.min(non_zero_indices[1]), np.max(non_zero_indices[1]) + min_y, max_y = np.min(non_zero_indices[0]), np.max(non_zero_indices[0]) + + # Calculate center of bounding box + center_x = np.mean(non_zero_indices[1]) + center_y = np.mean(non_zero_indices[0]) + curr_center = (round(center_x), round(center_y)) + + # If this is the first frame, initialize prev_center with curr_center + if not hasattr(self, 'prev_center'): + self.prev_center = curr_center + + # Smooth the changes in the center coordinates from the second frame onwards + if i > 0: + center = self.smooth_center(self.prev_center, curr_center, bbox_smooth_alpha) + else: + center = curr_center + + # Update prev_center for the next frame + self.prev_center = center + + # Create bounding box using max_bbox_width and max_bbox_height + half_box_width = round(self.max_bbox_width / 2) + half_box_height = round(self.max_bbox_height / 2) + min_x = max(0, center[0] - half_box_width) + max_x = min(img.shape[1], center[0] + half_box_width) + min_y = max(0, center[1] - half_box_height) + max_y = min(img.shape[0], center[1] + half_box_height) + + # Append bounding box coordinates + bounding_boxes.append((min_x, min_y, max_x - min_x, max_y - min_y)) + + # Crop the image from the bounding box + cropped_img = img[min_y:max_y, min_x:max_x, :] + + # Calculate the new dimensions while maintaining the aspect ratio + new_height = min(cropped_img.shape[0], self.max_bbox_height) + new_width = round(new_height * bbox_aspect_ratio) + + # Resize the image + resize_transform = Resize((new_height, new_width)) + resized_img = resize_transform(cropped_img.permute(2, 0, 1)) + + # Perform the center crop to the desired size + crop_transform = CenterCrop((self.max_bbox_height, self.max_bbox_width)) # swap the order here if necessary + cropped_resized_img = crop_transform(resized_img) + + cropped_images.append(cropped_resized_img.permute(1, 2, 0)) + + cropped_out = torch.stack(cropped_images, dim=0) + + return (original_images, cropped_out, bounding_boxes, self.max_bbox_width, self.max_bbox_height, ) + +class BatchUncrop: + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "original_images": ("IMAGE",), + "cropped_images": ("IMAGE",), + "bboxes": ("BBOX",), + "border_blending": ("FLOAT", {"default": 0.25, "min": 0.0, "max": 1.0, "step": 0.01}, ), + "crop_rescale": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), + "border_top": ("BOOLEAN", {"default": True}), + "border_bottom": ("BOOLEAN", {"default": True}), + "border_left": ("BOOLEAN", {"default": True}), + "border_right": ("BOOLEAN", {"default": True}), + } + } + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "uncrop" + + CATEGORY = "KJNodes/masking" + + def uncrop(self, original_images, cropped_images, bboxes, border_blending, crop_rescale, border_top, border_bottom, border_left, border_right): + def inset_border(image, border_width, border_color, border_top, border_bottom, border_left, border_right): + draw = ImageDraw.Draw(image) + width, height = image.size + if border_top: + draw.rectangle((0, 0, width, border_width), fill=border_color) + if border_bottom: + draw.rectangle((0, height - border_width, width, height), fill=border_color) + if border_left: + draw.rectangle((0, 0, border_width, height), fill=border_color) + if border_right: + draw.rectangle((width - border_width, 0, width, height), fill=border_color) + return image + + if len(original_images) != len(cropped_images): + raise ValueError(f"The number of original_images ({len(original_images)}) and cropped_images ({len(cropped_images)}) should be the same") + + # Ensure there are enough bboxes, but drop the excess if there are more bboxes than images + if len(bboxes) > len(original_images): + print(f"Warning: Dropping excess bounding boxes. Expected {len(original_images)}, but got {len(bboxes)}") + bboxes = bboxes[:len(original_images)] + elif len(bboxes) < len(original_images): + raise ValueError("There should be at least as many bboxes as there are original and cropped images") + + input_images = tensor2pil(original_images) + crop_imgs = tensor2pil(cropped_images) + + out_images = [] + for i in range(len(input_images)): + img = input_images[i] + crop = crop_imgs[i] + bbox = bboxes[i] + + # uncrop the image based on the bounding box + bb_x, bb_y, bb_width, bb_height = bbox + + paste_region = bbox_to_region((bb_x, bb_y, bb_width, bb_height), img.size) + + # scale factors + scale_x = crop_rescale + scale_y = crop_rescale + + # scaled paste_region + paste_region = (round(paste_region[0]*scale_x), round(paste_region[1]*scale_y), round(paste_region[2]*scale_x), round(paste_region[3]*scale_y)) + + # rescale the crop image to fit the paste_region + crop = crop.resize((round(paste_region[2]-paste_region[0]), round(paste_region[3]-paste_region[1]))) + crop_img = crop.convert("RGB") + + if border_blending > 1.0: + border_blending = 1.0 + elif border_blending < 0.0: + border_blending = 0.0 + + blend_ratio = (max(crop_img.size) / 2) * float(border_blending) + + blend = img.convert("RGBA") + mask = Image.new("L", img.size, 0) + + mask_block = Image.new("L", (paste_region[2]-paste_region[0], paste_region[3]-paste_region[1]), 255) + mask_block = inset_border(mask_block, round(blend_ratio / 2), (0), border_top, border_bottom, border_left, border_right) + + mask.paste(mask_block, paste_region) + blend.paste(crop_img, paste_region) + + mask = mask.filter(ImageFilter.BoxBlur(radius=blend_ratio / 4)) + mask = mask.filter(ImageFilter.GaussianBlur(radius=blend_ratio / 4)) + + blend.putalpha(mask) + img = Image.alpha_composite(img.convert("RGBA"), blend) + out_images.append(img.convert("RGB")) + + return (pil2tensor(out_images),) + +class BatchCropFromMaskAdvanced: + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "original_images": ("IMAGE",), + "masks": ("MASK",), + "crop_size_mult": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), + "bbox_smooth_alpha": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}), + }, + } + + RETURN_TYPES = ( + "IMAGE", + "IMAGE", + "MASK", + "IMAGE", + "MASK", + "BBOX", + "BBOX", + "INT", + "INT", + ) + RETURN_NAMES = ( + "original_images", + "cropped_images", + "cropped_masks", + "combined_crop_image", + "combined_crop_masks", + "bboxes", + "combined_bounding_box", + "bbox_width", + "bbox_height", + ) + FUNCTION = "crop" + CATEGORY = "KJNodes/masking" + + def smooth_bbox_size(self, prev_bbox_size, curr_bbox_size, alpha): + return round(alpha * curr_bbox_size + (1 - alpha) * prev_bbox_size) + + def smooth_center(self, prev_center, curr_center, alpha=0.5): + return (round(alpha * curr_center[0] + (1 - alpha) * prev_center[0]), + round(alpha * curr_center[1] + (1 - alpha) * prev_center[1])) + + def crop(self, masks, original_images, crop_size_mult, bbox_smooth_alpha): + bounding_boxes = [] + combined_bounding_box = [] + cropped_images = [] + cropped_masks = [] + cropped_masks_out = [] + combined_crop_out = [] + combined_cropped_images = [] + combined_cropped_masks = [] + + def calculate_bbox(mask): + non_zero_indices = np.nonzero(np.array(mask)) + + # handle empty masks + min_x, max_x, min_y, max_y = 0, 0, 0, 0 + if len(non_zero_indices[1]) > 0 and len(non_zero_indices[0]) > 0: + min_x, max_x = np.min(non_zero_indices[1]), np.max(non_zero_indices[1]) + min_y, max_y = np.min(non_zero_indices[0]), np.max(non_zero_indices[0]) + + width = max_x - min_x + height = max_y - min_y + bbox_size = max(width, height) + return min_x, max_x, min_y, max_y, bbox_size + + combined_mask = torch.max(masks, dim=0)[0] + _mask = tensor2pil(combined_mask)[0] + new_min_x, new_max_x, new_min_y, new_max_y, combined_bbox_size = calculate_bbox(_mask) + center_x = (new_min_x + new_max_x) / 2 + center_y = (new_min_y + new_max_y) / 2 + half_box_size = round(combined_bbox_size // 2) + new_min_x = max(0, round(center_x - half_box_size)) + new_max_x = min(original_images[0].shape[1], round(center_x + half_box_size)) + new_min_y = max(0, round(center_y - half_box_size)) + new_max_y = min(original_images[0].shape[0], round(center_y + half_box_size)) + + combined_bounding_box.append((new_min_x, new_min_y, new_max_x - new_min_x, new_max_y - new_min_y)) + + self.max_bbox_size = 0 + + # First, calculate the maximum bounding box size across all masks + curr_max_bbox_size = max(calculate_bbox(tensor2pil(mask)[0])[-1] for mask in masks) + # Smooth the changes in the bounding box size + self.max_bbox_size = self.smooth_bbox_size(self.max_bbox_size, curr_max_bbox_size, bbox_smooth_alpha) + # Apply the crop size multiplier + self.max_bbox_size = round(self.max_bbox_size * crop_size_mult) + # Make sure max_bbox_size is divisible by 16, if not, round it upwards so it is + self.max_bbox_size = math.ceil(self.max_bbox_size / 16) * 16 + + if self.max_bbox_size > original_images[0].shape[0] or self.max_bbox_size > original_images[0].shape[1]: + # max_bbox_size can only be as big as our input's width or height, and it has to be even + self.max_bbox_size = math.floor(min(original_images[0].shape[0], original_images[0].shape[1]) / 2) * 2 + + # Then, for each mask and corresponding image... + for i, (mask, img) in enumerate(zip(masks, original_images)): + _mask = tensor2pil(mask)[0] + non_zero_indices = np.nonzero(np.array(_mask)) + + # check for empty masks + if len(non_zero_indices[0]) > 0 and len(non_zero_indices[1]) > 0: + min_x, max_x = np.min(non_zero_indices[1]), np.max(non_zero_indices[1]) + min_y, max_y = np.min(non_zero_indices[0]), np.max(non_zero_indices[0]) + + # Calculate center of bounding box + center_x = np.mean(non_zero_indices[1]) + center_y = np.mean(non_zero_indices[0]) + curr_center = (round(center_x), round(center_y)) + + # If this is the first frame, initialize prev_center with curr_center + if not hasattr(self, 'prev_center'): + self.prev_center = curr_center + + # Smooth the changes in the center coordinates from the second frame onwards + if i > 0: + center = self.smooth_center(self.prev_center, curr_center, bbox_smooth_alpha) + else: + center = curr_center + + # Update prev_center for the next frame + self.prev_center = center + + # Create bounding box using max_bbox_size + half_box_size = self.max_bbox_size // 2 + min_x = max(0, center[0] - half_box_size) + max_x = min(img.shape[1], center[0] + half_box_size) + min_y = max(0, center[1] - half_box_size) + max_y = min(img.shape[0], center[1] + half_box_size) + + # Append bounding box coordinates + bounding_boxes.append((min_x, min_y, max_x - min_x, max_y - min_y)) + + # Crop the image from the bounding box + cropped_img = img[min_y:max_y, min_x:max_x, :] + cropped_mask = mask[min_y:max_y, min_x:max_x] + + # Resize the cropped image to a fixed size + new_size = max(cropped_img.shape[0], cropped_img.shape[1]) + resize_transform = Resize(new_size, interpolation=InterpolationMode.NEAREST, max_size=max(img.shape[0], img.shape[1])) + resized_mask = resize_transform(cropped_mask.unsqueeze(0).unsqueeze(0)).squeeze(0).squeeze(0) + resized_img = resize_transform(cropped_img.permute(2, 0, 1)) + # Perform the center crop to the desired size + # Constrain the crop to the smaller of our bbox or our image so we don't expand past the image dimensions. + crop_transform = CenterCrop((min(self.max_bbox_size, resized_img.shape[1]), min(self.max_bbox_size, resized_img.shape[2]))) + + cropped_resized_img = crop_transform(resized_img) + cropped_images.append(cropped_resized_img.permute(1, 2, 0)) + + cropped_resized_mask = crop_transform(resized_mask) + cropped_masks.append(cropped_resized_mask) + + combined_cropped_img = original_images[i][new_min_y:new_max_y, new_min_x:new_max_x, :] + combined_cropped_images.append(combined_cropped_img) + + combined_cropped_mask = masks[i][new_min_y:new_max_y, new_min_x:new_max_x] + combined_cropped_masks.append(combined_cropped_mask) + else: + bounding_boxes.append((0, 0, img.shape[1], img.shape[0])) + cropped_images.append(img) + cropped_masks.append(mask) + combined_cropped_images.append(img) + combined_cropped_masks.append(mask) + + cropped_out = torch.stack(cropped_images, dim=0) + combined_crop_out = torch.stack(combined_cropped_images, dim=0) + cropped_masks_out = torch.stack(cropped_masks, dim=0) + combined_crop_mask_out = torch.stack(combined_cropped_masks, dim=0) + + return (original_images, cropped_out, cropped_masks_out, combined_crop_out, combined_crop_mask_out, bounding_boxes, combined_bounding_box, self.max_bbox_size, self.max_bbox_size) + +class FilterZeroMasksAndCorrespondingImages: + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "masks": ("MASK",), + }, + "optional": { + "original_images": ("IMAGE",), + }, + } + + RETURN_TYPES = ("MASK", "IMAGE", "IMAGE", "INDEXES",) + RETURN_NAMES = ("non_zero_masks_out", "non_zero_mask_images_out", "zero_mask_images_out", "zero_mask_images_out_indexes",) + FUNCTION = "filter" + CATEGORY = "KJNodes/masking" + DESCRIPTION = """ +Filter out all the empty (i.e. all zero) mask in masks +Also filter out all the corresponding images in original_images by indexes if provide + +original_images (optional): If provided, need have same length as masks. +""" + + def filter(self, masks, original_images=None): + non_zero_masks = [] + non_zero_mask_images = [] + zero_mask_images = [] + zero_mask_images_indexes = [] + + masks_num = len(masks) + also_process_images = False + if original_images is not None: + imgs_num = len(original_images) + if len(original_images) == masks_num: + also_process_images = True + else: + print(f"[WARNING] ignore input: original_images, due to number of original_images ({imgs_num}) is not equal to number of masks ({masks_num})") + + for i in range(masks_num): + non_zero_num = np.count_nonzero(np.array(masks[i])) + if non_zero_num > 0: + non_zero_masks.append(masks[i]) + if also_process_images: + non_zero_mask_images.append(original_images[i]) + else: + zero_mask_images.append(original_images[i]) + zero_mask_images_indexes.append(i) + + non_zero_masks_out = torch.stack(non_zero_masks, dim=0) + non_zero_mask_images_out = zero_mask_images_out = zero_mask_images_out_indexes = None + + if also_process_images: + non_zero_mask_images_out = torch.stack(non_zero_mask_images, dim=0) + if len(zero_mask_images) > 0: + zero_mask_images_out = torch.stack(zero_mask_images, dim=0) + zero_mask_images_out_indexes = zero_mask_images_indexes + + return (non_zero_masks_out, non_zero_mask_images_out, zero_mask_images_out, zero_mask_images_out_indexes) + +class InsertImageBatchByIndexes: + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "images": ("IMAGE",), + "images_to_insert": ("IMAGE",), + "insert_indexes": ("INDEXES",), + }, + } + + RETURN_TYPES = ("IMAGE", ) + RETURN_NAMES = ("images_after_insert", ) + FUNCTION = "insert" + CATEGORY = "KJNodes/image" + DESCRIPTION = """ +This node is designed to be use with node FilterZeroMasksAndCorrespondingImages +It inserts the images_to_insert into images according to insert_indexes + +Returns: + images_after_insert: updated original images with origonal sequence order +""" + + def insert(self, images, images_to_insert, insert_indexes): + images_after_insert = images + + if images_to_insert is not None and insert_indexes is not None: + images_to_insert_num = len(images_to_insert) + insert_indexes_num = len(insert_indexes) + if images_to_insert_num == insert_indexes_num: + images_after_insert = [] + + i_images = 0 + for i in range(len(images) + images_to_insert_num): + if i in insert_indexes: + images_after_insert.append(images_to_insert[insert_indexes.index(i)]) + else: + images_after_insert.append(images[i_images]) + i_images += 1 + + images_after_insert = torch.stack(images_after_insert, dim=0) + + else: + print(f"[WARNING] skip this node, due to number of images_to_insert ({images_to_insert_num}) is not equal to number of insert_indexes ({insert_indexes_num})") + + + return (images_after_insert, ) + +class BatchUncropAdvanced: + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "original_images": ("IMAGE",), + "cropped_images": ("IMAGE",), + "cropped_masks": ("MASK",), + "combined_crop_mask": ("MASK",), + "bboxes": ("BBOX",), + "border_blending": ("FLOAT", {"default": 0.25, "min": 0.0, "max": 1.0, "step": 0.01}, ), + "crop_rescale": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), + "use_combined_mask": ("BOOLEAN", {"default": False}), + "use_square_mask": ("BOOLEAN", {"default": True}), + }, + "optional": { + "combined_bounding_box": ("BBOX", {"default": None}), + }, + } + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "uncrop" + CATEGORY = "KJNodes/masking" + + + def uncrop(self, original_images, cropped_images, cropped_masks, combined_crop_mask, bboxes, border_blending, crop_rescale, use_combined_mask, use_square_mask, combined_bounding_box = None): + + def inset_border(image, border_width=20, border_color=(0)): + width, height = image.size + bordered_image = Image.new(image.mode, (width, height), border_color) + bordered_image.paste(image, (0, 0)) + draw = ImageDraw.Draw(bordered_image) + draw.rectangle((0, 0, width - 1, height - 1), outline=border_color, width=border_width) + return bordered_image + + if len(original_images) != len(cropped_images): + raise ValueError(f"The number of original_images ({len(original_images)}) and cropped_images ({len(cropped_images)}) should be the same") + + # Ensure there are enough bboxes, but drop the excess if there are more bboxes than images + if len(bboxes) > len(original_images): + print(f"Warning: Dropping excess bounding boxes. Expected {len(original_images)}, but got {len(bboxes)}") + bboxes = bboxes[:len(original_images)] + elif len(bboxes) < len(original_images): + raise ValueError("There should be at least as many bboxes as there are original and cropped images") + + crop_imgs = tensor2pil(cropped_images) + input_images = tensor2pil(original_images) + out_images = [] + + for i in range(len(input_images)): + img = input_images[i] + crop = crop_imgs[i] + bbox = bboxes[i] + + if use_combined_mask: + bb_x, bb_y, bb_width, bb_height = combined_bounding_box[0] + paste_region = bbox_to_region((bb_x, bb_y, bb_width, bb_height), img.size) + mask = combined_crop_mask[i] + else: + bb_x, bb_y, bb_width, bb_height = bbox + paste_region = bbox_to_region((bb_x, bb_y, bb_width, bb_height), img.size) + mask = cropped_masks[i] + + # scale paste_region + scale_x = scale_y = crop_rescale + paste_region = (round(paste_region[0]*scale_x), round(paste_region[1]*scale_y), round(paste_region[2]*scale_x), round(paste_region[3]*scale_y)) + + # rescale the crop image to fit the paste_region + crop = crop.resize((round(paste_region[2]-paste_region[0]), round(paste_region[3]-paste_region[1]))) + crop_img = crop.convert("RGB") + + #border blending + if border_blending > 1.0: + border_blending = 1.0 + elif border_blending < 0.0: + border_blending = 0.0 + + blend_ratio = (max(crop_img.size) / 2) * float(border_blending) + blend = img.convert("RGBA") + + if use_square_mask: + mask = Image.new("L", img.size, 0) + mask_block = Image.new("L", (paste_region[2]-paste_region[0], paste_region[3]-paste_region[1]), 255) + mask_block = inset_border(mask_block, round(blend_ratio / 2), (0)) + mask.paste(mask_block, paste_region) + else: + original_mask = tensor2pil(mask)[0] + original_mask = original_mask.resize((paste_region[2]-paste_region[0], paste_region[3]-paste_region[1])) + mask = Image.new("L", img.size, 0) + mask.paste(original_mask, paste_region) + + mask = mask.filter(ImageFilter.BoxBlur(radius=blend_ratio / 4)) + mask = mask.filter(ImageFilter.GaussianBlur(radius=blend_ratio / 4)) + + blend.paste(crop_img, paste_region) + blend.putalpha(mask) + + img = Image.alpha_composite(img.convert("RGBA"), blend) + out_images.append(img.convert("RGB")) + + return (pil2tensor(out_images),) + +class SplitBboxes: + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "bboxes": ("BBOX",), + "index": ("INT", {"default": 0,"min": 0, "max": 99999999, "step": 1}), + }, + } + + RETURN_TYPES = ("BBOX","BBOX",) + RETURN_NAMES = ("bboxes_a","bboxes_b",) + FUNCTION = "splitbbox" + CATEGORY = "KJNodes/masking" + DESCRIPTION = """ +Splits the specified bbox list at the given index into two lists. +""" + + def splitbbox(self, bboxes, index): + bboxes_a = bboxes[:index] # Sub-list from the start of bboxes up to (but not including) the index + bboxes_b = bboxes[index:] # Sub-list from the index to the end of bboxes + + return (bboxes_a, bboxes_b,) + +class BboxToInt: + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "bboxes": ("BBOX",), + "index": ("INT", {"default": 0,"min": 0, "max": 99999999, "step": 1}), + }, + } + + RETURN_TYPES = ("INT","INT","INT","INT","INT","INT",) + RETURN_NAMES = ("x_min","y_min","width","height", "center_x","center_y",) + FUNCTION = "bboxtoint" + CATEGORY = "KJNodes/masking" + DESCRIPTION = """ +Returns selected index from bounding box list as integers. +""" + def bboxtoint(self, bboxes, index): + x_min, y_min, width, height = bboxes[index] + center_x = int(x_min + width / 2) + center_y = int(y_min + height / 2) + + return (x_min, y_min, width, height, center_x, center_y,) + +class BboxVisualize: + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "images": ("IMAGE",), + "bboxes": ("BBOX",), + "line_width": ("INT", {"default": 1,"min": 1, "max": 10, "step": 1}), + }, + } + + RETURN_TYPES = ("IMAGE",) + RETURN_NAMES = ("images",) + FUNCTION = "visualizebbox" + DESCRIPTION = """ +Visualizes the specified bbox on the image. +""" + + CATEGORY = "KJNodes/masking" + + def visualizebbox(self, bboxes, images, line_width): + image_list = [] + for image, bbox in zip(images, bboxes): + x_min, y_min, width, height = bbox + + # Ensure bbox coordinates are integers + x_min = int(x_min) + y_min = int(y_min) + width = int(width) + height = int(height) + + # Permute the image dimensions + image = image.permute(2, 0, 1) + + # Clone the image to draw bounding boxes + img_with_bbox = image.clone() + + # Define the color for the bbox, e.g., red + color = torch.tensor([1, 0, 0], dtype=torch.float32) + + # Ensure color tensor matches the image channels + if color.shape[0] != img_with_bbox.shape[0]: + color = color.unsqueeze(1).expand(-1, line_width) + + # Draw lines for each side of the bbox with the specified line width + for lw in range(line_width): + # Top horizontal line + if y_min + lw < img_with_bbox.shape[1]: + img_with_bbox[:, y_min + lw, x_min:x_min + width] = color[:, None] + + # Bottom horizontal line + if y_min + height - lw < img_with_bbox.shape[1]: + img_with_bbox[:, y_min + height - lw, x_min:x_min + width] = color[:, None] + + # Left vertical line + if x_min + lw < img_with_bbox.shape[2]: + img_with_bbox[:, y_min:y_min + height, x_min + lw] = color[:, None] + + # Right vertical line + if x_min + width - lw < img_with_bbox.shape[2]: + img_with_bbox[:, y_min:y_min + height, x_min + width - lw] = color[:, None] + + # Permute the image dimensions back + img_with_bbox = img_with_bbox.permute(1, 2, 0).unsqueeze(0) + image_list.append(img_with_bbox) + + return (torch.cat(image_list, dim=0),) + + return (torch.cat(image_list, dim=0),) \ No newline at end of file diff --git a/custom_nodes/ComfyUI-KJNodes-main/nodes/curve_nodes.py b/custom_nodes/ComfyUI-KJNodes-main/nodes/curve_nodes.py new file mode 100644 index 0000000000000000000000000000000000000000..8552d0053a653bffe8cf8a9230b4a6529485daf8 --- /dev/null +++ b/custom_nodes/ComfyUI-KJNodes-main/nodes/curve_nodes.py @@ -0,0 +1,1561 @@ +import torch +from torchvision import transforms +import json +from PIL import Image, ImageDraw, ImageFont, ImageColor, ImageFilter, ImageChops +import numpy as np +from ..utility.utility import pil2tensor, tensor2pil +import folder_paths +import io +import base64 + +from comfy.utils import common_upscale + +def plot_coordinates_to_tensor(coordinates, height, width, bbox_height, bbox_width, size_multiplier, prompt): + import matplotlib + matplotlib.use('Agg') + from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas + text_color = '#999999' + bg_color = '#353535' + matplotlib.pyplot.rcParams['text.color'] = text_color + fig, ax = matplotlib.pyplot.subplots(figsize=(width/100, height/100), dpi=100) + fig.patch.set_facecolor(bg_color) + ax.set_facecolor(bg_color) + ax.grid(color=text_color, linestyle='-', linewidth=0.5) + ax.set_xlabel('x', color=text_color) + ax.set_ylabel('y', color=text_color) + for text in ax.get_xticklabels() + ax.get_yticklabels(): + text.set_color(text_color) + ax.set_title('position for: ' + prompt) + ax.set_xlabel('X Coordinate') + ax.set_ylabel('Y Coordinate') + #ax.legend().remove() + ax.set_xlim(0, width) # Set the x-axis to match the input latent width + ax.set_ylim(height, 0) # Set the y-axis to match the input latent height, with (0,0) at top-left + # Adjust the margins of the subplot + matplotlib.pyplot.subplots_adjust(left=0.08, right=0.95, bottom=0.05, top=0.95, wspace=0.2, hspace=0.2) + + cmap = matplotlib.pyplot.get_cmap('rainbow') + image_batch = [] + canvas = FigureCanvas(fig) + width, height = fig.get_size_inches() * fig.get_dpi() + # Draw a box at each coordinate + for i, ((x, y), size) in enumerate(zip(coordinates, size_multiplier)): + color_index = i / (len(coordinates) - 1) + color = cmap(color_index) + draw_height = bbox_height * size + draw_width = bbox_width * size + rect = matplotlib.patches.Rectangle((x - draw_width/2, y - draw_height/2), draw_width, draw_height, + linewidth=1, edgecolor=color, facecolor='none', alpha=0.5) + ax.add_patch(rect) + + # Check if there is a next coordinate to draw an arrow to + if i < len(coordinates) - 1: + x1, y1 = coordinates[i] + x2, y2 = coordinates[i + 1] + ax.annotate("", xy=(x2, y2), xytext=(x1, y1), + arrowprops=dict(arrowstyle="->", + linestyle="-", + lw=1, + color=color, + mutation_scale=20)) + canvas.draw() + image_np = np.frombuffer(canvas.tostring_rgb(), dtype='uint8').reshape(int(height), int(width), 3).copy() + image_tensor = torch.from_numpy(image_np).float() / 255.0 + image_tensor = image_tensor.unsqueeze(0) + image_batch.append(image_tensor) + + matplotlib.pyplot.close(fig) + image_batch_tensor = torch.cat(image_batch, dim=0) + + return image_batch_tensor + +class PlotCoordinates: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "coordinates": ("STRING", {"forceInput": True}), + "text": ("STRING", {"default": 'title', "multiline": False}), + "width": ("INT", {"default": 512, "min": 8, "max": 4096, "step": 8}), + "height": ("INT", {"default": 512, "min": 8, "max": 4096, "step": 8}), + "bbox_width": ("INT", {"default": 128, "min": 8, "max": 4096, "step": 8}), + "bbox_height": ("INT", {"default": 128, "min": 8, "max": 4096, "step": 8}), + }, + "optional": {"size_multiplier": ("FLOAT", {"default": [1.0], "forceInput": True})}, + } + RETURN_TYPES = ("IMAGE", "INT", "INT", "INT", "INT",) + RETURN_NAMES = ("images", "width", "height", "bbox_width", "bbox_height",) + FUNCTION = "append" + CATEGORY = "KJNodes/experimental" + DESCRIPTION = """ +Plots coordinates to sequence of images using Matplotlib. + +""" + + def append(self, coordinates, text, width, height, bbox_width, bbox_height, size_multiplier=[1.0]): + coordinates = json.loads(coordinates.replace("'", '"')) + coordinates = [(coord['x'], coord['y']) for coord in coordinates] + batch_size = len(coordinates) + if not size_multiplier or len(size_multiplier) != batch_size: + size_multiplier = [0] * batch_size + else: + size_multiplier = size_multiplier * (batch_size // len(size_multiplier)) + size_multiplier[:batch_size % len(size_multiplier)] + + plot_image_tensor = plot_coordinates_to_tensor(coordinates, height, width, bbox_height, bbox_width, size_multiplier, text) + + return (plot_image_tensor, width, height, bbox_width, bbox_height) + +class SplineEditor: + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "points_store": ("STRING", {"multiline": False}), + "coordinates": ("STRING", {"multiline": False}), + "mask_width": ("INT", {"default": 512, "min": 8, "max": 4096, "step": 8}), + "mask_height": ("INT", {"default": 512, "min": 8, "max": 4096, "step": 8}), + "points_to_sample": ("INT", {"default": 16, "min": 2, "max": 1000, "step": 1}), + "sampling_method": ( + [ + 'path', + 'time', + 'controlpoints' + ], + { + "default": 'time' + }), + "interpolation": ( + [ + 'cardinal', + 'monotone', + 'basis', + 'linear', + 'step-before', + 'step-after', + 'polar', + 'polar-reverse', + ], + { + "default": 'cardinal' + }), + "tension": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}), + "repeat_output": ("INT", {"default": 1, "min": 1, "max": 4096, "step": 1}), + "float_output_type": ( + [ + 'list', + 'pandas series', + 'tensor', + ], + { + "default": 'list' + }), + }, + "optional": { + "min_value": ("FLOAT", {"default": 0.0, "min": -10000.0, "max": 10000.0, "step": 0.01}), + "max_value": ("FLOAT", {"default": 1.0, "min": -10000.0, "max": 10000.0, "step": 0.01}), + "bg_image": ("IMAGE", ), + } + } + + RETURN_TYPES = ("MASK", "STRING", "FLOAT", "INT", "STRING",) + RETURN_NAMES = ("mask", "coord_str", "float", "count", "normalized_str",) + FUNCTION = "splinedata" + CATEGORY = "KJNodes/weights" + DESCRIPTION = """ +# WORK IN PROGRESS +Do not count on this as part of your workflow yet, +probably contains lots of bugs and stability is not +guaranteed!! + +## Graphical editor to create values for various +## schedules and/or mask batches. + +**Shift + click** to add control point at end. +**Ctrl + click** to add control point (subdivide) between two points. +**Right click on a point** to delete it. +Note that you can't delete from start/end. + +Right click on canvas for context menu: +These are purely visual options, doesn't affect the output: + - Toggle handles visibility + - Display sample points: display the points to be returned. + +**points_to_sample** value sets the number of samples +returned from the **drawn spline itself**, this is independent from the +actual control points, so the interpolation type matters. +sampling_method: + - time: samples along the time axis, used for schedules + - path: samples along the path itself, useful for coordinates + +output types: + - mask batch + example compatible nodes: anything that takes masks + - list of floats + example compatible nodes: IPAdapter weights + - pandas series + example compatible nodes: anything that takes Fizz' + nodes Batch Value Schedule + - torch tensor + example compatible nodes: unknown +""" + + def splinedata(self, mask_width, mask_height, coordinates, float_output_type, interpolation, + points_to_sample, sampling_method, points_store, tension, repeat_output, + min_value=0.0, max_value=1.0, bg_image=None): + + coordinates = json.loads(coordinates) + normalized = [] + normalized_y_values = [] + for coord in coordinates: + coord['x'] = int(round(coord['x'])) + coord['y'] = int(round(coord['y'])) + norm_x = (1.0 - (coord['x'] / mask_height) - 0.0) * (max_value - min_value) + min_value + norm_y = (1.0 - (coord['y'] / mask_height) - 0.0) * (max_value - min_value) + min_value + normalized_y_values.append(norm_y) + normalized.append({'x':norm_x, 'y':norm_y}) + if float_output_type == 'list': + out_floats = normalized_y_values * repeat_output + elif float_output_type == 'pandas series': + try: + import pandas as pd + except: + raise Exception("MaskOrImageToWeight: pandas is not installed. Please install pandas to use this output_type") + out_floats = pd.Series(normalized_y_values * repeat_output), + elif float_output_type == 'tensor': + out_floats = torch.tensor(normalized_y_values * repeat_output, dtype=torch.float32) + # Create a color map for grayscale intensities + color_map = lambda y: torch.full((mask_height, mask_width, 3), y, dtype=torch.float32) + + # Create image tensors for each normalized y value + mask_tensors = [color_map(y) for y in normalized_y_values] + masks_out = torch.stack(mask_tensors) + masks_out = masks_out.repeat(repeat_output, 1, 1, 1) + masks_out = masks_out.mean(dim=-1) + if bg_image is None: + return (masks_out, json.dumps(coordinates), out_floats, len(out_floats) , json.dumps(normalized)) + else: + transform = transforms.ToPILImage() + image = transform(bg_image[0].permute(2, 0, 1)) + buffered = io.BytesIO() + image.save(buffered, format="JPEG", quality=75) + + # Step 3: Encode the image bytes to a Base64 string + img_bytes = buffered.getvalue() + img_base64 = base64.b64encode(img_bytes).decode('utf-8') + return { + "ui": {"bg_image": [img_base64]}, + "result":(masks_out, json.dumps(coordinates), out_floats, len(out_floats) , json.dumps(normalized)) + } + + +class CreateShapeMaskOnPath: + + RETURN_TYPES = ("MASK", "MASK",) + RETURN_NAMES = ("mask", "mask_inverted",) + FUNCTION = "createshapemask" + CATEGORY = "KJNodes/masking/generate" + DESCRIPTION = """ +Creates a mask or batch of masks with the specified shape. +Locations are center locations. +""" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "shape": ( + [ 'circle', + 'square', + 'triangle', + ], + { + "default": 'circle' + }), + "coordinates": ("STRING", {"forceInput": True}), + "frame_width": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}), + "frame_height": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}), + "shape_width": ("INT", {"default": 128,"min": 8, "max": 4096, "step": 1}), + "shape_height": ("INT", {"default": 128,"min": 8, "max": 4096, "step": 1}), + }, + "optional": { + "size_multiplier": ("FLOAT", {"default": [1.0], "forceInput": True}), + } + } + + def createshapemask(self, coordinates, frame_width, frame_height, shape_width, shape_height, shape, size_multiplier=[1.0]): + # Define the number of images in the batch + coordinates = coordinates.replace("'", '"') + coordinates = json.loads(coordinates) + + batch_size = len(coordinates) + out = [] + color = "white" + if not size_multiplier or len(size_multiplier) != batch_size: + size_multiplier = [0] * batch_size + else: + size_multiplier = size_multiplier * (batch_size // len(size_multiplier)) + size_multiplier[:batch_size % len(size_multiplier)] + for i, coord in enumerate(coordinates): + image = Image.new("RGB", (frame_width, frame_height), "black") + draw = ImageDraw.Draw(image) + + # Calculate the size for this frame and ensure it's not less than 0 + current_width = max(0, shape_width + i * size_multiplier[i]) + current_height = max(0, shape_height + i * size_multiplier[i]) + + location_x = coord['x'] + location_y = coord['y'] + + if shape == 'circle' or shape == 'square': + # Define the bounding box for the shape + left_up_point = (location_x - current_width // 2, location_y - current_height // 2) + right_down_point = (location_x + current_width // 2, location_y + current_height // 2) + two_points = [left_up_point, right_down_point] + + if shape == 'circle': + draw.ellipse(two_points, fill=color) + elif shape == 'square': + draw.rectangle(two_points, fill=color) + + elif shape == 'triangle': + # Define the points for the triangle + left_up_point = (location_x - current_width // 2, location_y + current_height // 2) # bottom left + right_down_point = (location_x + current_width // 2, location_y + current_height // 2) # bottom right + top_point = (location_x, location_y - current_height // 2) # top point + draw.polygon([top_point, left_up_point, right_down_point], fill=color) + + image = pil2tensor(image) + mask = image[:, :, :, 0] + out.append(mask) + outstack = torch.cat(out, dim=0) + return (outstack, 1.0 - outstack,) + +class CreateShapeImageOnPath: + + RETURN_TYPES = ("IMAGE", "MASK",) + RETURN_NAMES = ("image","mask", ) + FUNCTION = "createshapemask" + CATEGORY = "KJNodes/image" + DESCRIPTION = """ +Creates an image or batch of images with the specified shape. +Locations are center locations. +""" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "shape": ( + [ 'circle', + 'square', + 'triangle', + ], + { + "default": 'circle' + }), + "coordinates": ("STRING", {"forceInput": True}), + "frame_width": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}), + "frame_height": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}), + "shape_width": ("INT", {"default": 128,"min": 2, "max": 4096, "step": 1}), + "shape_height": ("INT", {"default": 128,"min": 2, "max": 4096, "step": 1}), + "shape_color": ("STRING", {"default": 'white'}), + "bg_color": ("STRING", {"default": 'black'}), + "blur_radius": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 100, "step": 0.1}), + "intensity": ("FLOAT", {"default": 1.0, "min": 0.01, "max": 100.0, "step": 0.01}), + }, + "optional": { + "size_multiplier": ("FLOAT", {"default": [1.0], "forceInput": True}), + "trailing": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), + } + } + + def createshapemask(self, coordinates, frame_width, frame_height, shape_width, shape_height, shape_color, + bg_color, blur_radius, shape, intensity, size_multiplier=[1.0], accumulate=False, trailing=1.0): + # Define the number of images in the batch + if len(coordinates) < 10: + coords_list = [] + for coords in coordinates: + coords = json.loads(coords.replace("'", '"')) + coords_list.append(coords) + else: + coords = json.loads(coordinates.replace("'", '"')) + coords_list = [coords] + + batch_size = len(coords_list[0]) + images_list = [] + masks_list = [] + + if not size_multiplier or len(size_multiplier) != batch_size: + size_multiplier = [0] * batch_size + else: + size_multiplier = size_multiplier * (batch_size // len(size_multiplier)) + size_multiplier[:batch_size % len(size_multiplier)] + + previous_output = None + + for i in range(batch_size): + image = Image.new("RGB", (frame_width, frame_height), bg_color) + draw = ImageDraw.Draw(image) + + # Calculate the size for this frame and ensure it's not less than 0 + current_width = max(0, shape_width + i * size_multiplier[i]) + current_height = max(0, shape_height + i * size_multiplier[i]) + + for coords in coords_list: + location_x = coords[i]['x'] + location_y = coords[i]['y'] + + if shape == 'circle' or shape == 'square': + # Define the bounding box for the shape + left_up_point = (location_x - current_width // 2, location_y - current_height // 2) + right_down_point = (location_x + current_width // 2, location_y + current_height // 2) + two_points = [left_up_point, right_down_point] + + if shape == 'circle': + draw.ellipse(two_points, fill=shape_color) + elif shape == 'square': + draw.rectangle(two_points, fill=shape_color) + + elif shape == 'triangle': + # Define the points for the triangle + left_up_point = (location_x - current_width // 2, location_y + current_height // 2) # bottom left + right_down_point = (location_x + current_width // 2, location_y + current_height // 2) # bottom right + top_point = (location_x, location_y - current_height // 2) # top point + draw.polygon([top_point, left_up_point, right_down_point], fill=shape_color) + + if blur_radius != 0: + image = image.filter(ImageFilter.GaussianBlur(blur_radius)) + # Blend the current image with the accumulated image + + image = pil2tensor(image) + if trailing != 1.0 and previous_output is not None: + # Add the decayed previous output to the current frame + image += trailing * previous_output + image = image / image.max() + previous_output = image + image = image * intensity + mask = image[:, :, :, 0] + masks_list.append(mask) + images_list.append(image) + out_images = torch.cat(images_list, dim=0).cpu().float() + out_masks = torch.cat(masks_list, dim=0) + return (out_images, out_masks) + +class CreateTextOnPath: + + RETURN_TYPES = ("IMAGE", "MASK", "MASK",) + RETURN_NAMES = ("image", "mask", "mask_inverted",) + FUNCTION = "createtextmask" + CATEGORY = "KJNodes/masking/generate" + DESCRIPTION = """ +Creates a mask or batch of masks with the specified text. +Locations are center locations. +""" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "coordinates": ("STRING", {"forceInput": True}), + "text": ("STRING", {"default": 'text', "multiline": True}), + "frame_width": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}), + "frame_height": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}), + "font": (folder_paths.get_filename_list("kjnodes_fonts"), ), + "font_size": ("INT", {"default": 42}), + "alignment": ( + [ 'left', + 'center', + 'right' + ], + {"default": 'center'} + ), + "text_color": ("STRING", {"default": 'white'}), + }, + "optional": { + "size_multiplier": ("FLOAT", {"default": [1.0], "forceInput": True}), + } + } + + def createtextmask(self, coordinates, frame_width, frame_height, font, font_size, text, text_color, alignment, size_multiplier=[1.0]): + coordinates = coordinates.replace("'", '"') + coordinates = json.loads(coordinates) + + batch_size = len(coordinates) + mask_list = [] + image_list = [] + color = text_color + font_path = folder_paths.get_full_path("kjnodes_fonts", font) + + if len(size_multiplier) != batch_size: + size_multiplier = size_multiplier * (batch_size // len(size_multiplier)) + size_multiplier[:batch_size % len(size_multiplier)] + + for i, coord in enumerate(coordinates): + image = Image.new("RGB", (frame_width, frame_height), "black") + draw = ImageDraw.Draw(image) + lines = text.split('\n') # Split the text into lines + # Apply the size multiplier to the font size for this iteration + current_font_size = int(font_size * size_multiplier[i]) + current_font = ImageFont.truetype(font_path, current_font_size) + line_heights = [current_font.getbbox(line)[3] for line in lines] # List of line heights + total_text_height = sum(line_heights) # Total height of text block + + # Calculate the starting Y position to center the block of text + start_y = coord['y'] - total_text_height // 2 + for j, line in enumerate(lines): + text_width, text_height = current_font.getbbox(line)[2], line_heights[j] + if alignment == 'left': + location_x = coord['x'] + elif alignment == 'center': + location_x = int(coord['x'] - text_width // 2) + elif alignment == 'right': + location_x = int(coord['x'] - text_width) + + location_y = int(start_y + sum(line_heights[:j])) + text_position = (location_x, location_y) + # Draw the text + try: + draw.text(text_position, line, fill=color, font=current_font, features=['-liga']) + except: + draw.text(text_position, line, fill=color, font=current_font) + + image = pil2tensor(image) + non_black_pixels = (image > 0).any(dim=-1) + mask = non_black_pixels.to(image.dtype) + mask_list.append(mask) + image_list.append(image) + + out_images = torch.cat(image_list, dim=0).cpu().float() + out_masks = torch.cat(mask_list, dim=0) + return (out_images, out_masks, 1.0 - out_masks,) + +class CreateGradientFromCoords: + + RETURN_TYPES = ("IMAGE", ) + RETURN_NAMES = ("image", ) + FUNCTION = "generate" + CATEGORY = "KJNodes/image" + DESCRIPTION = """ +Creates a gradient image from coordinates. +""" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "coordinates": ("STRING", {"forceInput": True}), + "frame_width": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}), + "frame_height": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}), + "start_color": ("STRING", {"default": 'white'}), + "end_color": ("STRING", {"default": 'black'}), + "multiplier": ("FLOAT", {"default": 1.0, "min": 0.01, "max": 100.0, "step": 0.01}), + }, + } + + def generate(self, coordinates, frame_width, frame_height, start_color, end_color, multiplier): + # Parse the coordinates + coordinates = json.loads(coordinates.replace("'", '"')) + + # Create an image + image = Image.new("RGB", (frame_width, frame_height)) + draw = ImageDraw.Draw(image) + + # Extract start and end points for the gradient + start_coord = coordinates[0] + end_coord = coordinates[1] + + start_color = ImageColor.getrgb(start_color) + end_color = ImageColor.getrgb(end_color) + + # Calculate the gradient direction (vector) + gradient_direction = (end_coord['x'] - start_coord['x'], end_coord['y'] - start_coord['y']) + gradient_length = (gradient_direction[0] ** 2 + gradient_direction[1] ** 2) ** 0.5 + + # Iterate over each pixel in the image + for y in range(frame_height): + for x in range(frame_width): + # Calculate the projection of the point on the gradient line + point_vector = (x - start_coord['x'], y - start_coord['y']) + projection = (point_vector[0] * gradient_direction[0] + point_vector[1] * gradient_direction[1]) / gradient_length + projection = max(min(projection, gradient_length), 0) # Clamp the projection value + + # Calculate the blend factor for the current pixel + blend = projection * multiplier / gradient_length + + # Determine the color of the current pixel + color = ( + int(start_color[0] + (end_color[0] - start_color[0]) * blend), + int(start_color[1] + (end_color[1] - start_color[1]) * blend), + int(start_color[2] + (end_color[2] - start_color[2]) * blend) + ) + + # Set the pixel color + draw.point((x, y), fill=color) + + # Convert the PIL image to a tensor (assuming such a function exists in your context) + image_tensor = pil2tensor(image) + + return (image_tensor,) + +class GradientToFloat: + + RETURN_TYPES = ("FLOAT", "FLOAT",) + RETURN_NAMES = ("float_x", "float_y", ) + FUNCTION = "sample" + CATEGORY = "KJNodes/image" + DESCRIPTION = """ +Calculates list of floats from image. +""" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "image": ("IMAGE", ), + "steps": ("INT", {"default": 10, "min": 2, "max": 10000, "step": 1}), + }, + } + + def sample(self, image, steps): + # Assuming image is a tensor with shape [B, H, W, C] + B, H, W, C = image.shape + + # Sample along the width axis (W) + w_intervals = torch.linspace(0, W - 1, steps=steps, dtype=torch.int64) + # Assuming we're sampling from the first batch and the first channel + w_sampled = image[0, :, w_intervals, 0] + + # Sample along the height axis (H) + h_intervals = torch.linspace(0, H - 1, steps=steps, dtype=torch.int64) + # Assuming we're sampling from the first batch and the first channel + h_sampled = image[0, h_intervals, :, 0] + + # Taking the mean across the height for width sampling, and across the width for height sampling + w_values = w_sampled.mean(dim=0).tolist() + h_values = h_sampled.mean(dim=1).tolist() + + return (w_values, h_values) + +class MaskOrImageToWeight: + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "output_type": ( + [ + 'list', + 'pandas series', + 'tensor', + 'string' + ], + { + "default": 'list' + }), + }, + "optional": { + "images": ("IMAGE",), + "masks": ("MASK",), + }, + + } + RETURN_TYPES = ("FLOAT", "STRING",) + FUNCTION = "execute" + CATEGORY = "KJNodes/weights" + DESCRIPTION = """ +Gets the mean values from mask or image batch +and returns that as the selected output type. +""" + + def execute(self, output_type, images=None, masks=None): + mean_values = [] + if masks is not None and images is None: + for mask in masks: + mean_values.append(mask.mean().item()) + elif masks is None and images is not None: + for image in images: + mean_values.append(image.mean().item()) + elif masks is not None and images is not None: + raise Exception("MaskOrImageToWeight: Use either mask or image input only.") + + # Convert mean_values to the specified output_type + if output_type == 'list': + out = mean_values + elif output_type == 'pandas series': + try: + import pandas as pd + except: + raise Exception("MaskOrImageToWeight: pandas is not installed. Please install pandas to use this output_type") + out = pd.Series(mean_values), + elif output_type == 'tensor': + out = torch.tensor(mean_values, dtype=torch.float32), + return (out, [str(value) for value in mean_values],) + +class WeightScheduleConvert: + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "input_values": ("FLOAT", {"default": 0.0, "forceInput": True}), + "output_type": ( + [ + 'match_input', + 'list', + 'pandas series', + 'tensor', + ], + { + "default": 'list' + }), + "invert": ("BOOLEAN", {"default": False}), + "repeat": ("INT", {"default": 1,"min": 1, "max": 255, "step": 1}), + }, + "optional": { + "remap_to_frames": ("INT", {"default": 0}), + "interpolation_curve": ("FLOAT", {"forceInput": True}), + "remap_values": ("BOOLEAN", {"default": False}), + "remap_min": ("FLOAT", {"default": 0.0, "min": -100000, "max": 100000.0, "step": 0.01}), + "remap_max": ("FLOAT", {"default": 1.0, "min": -100000, "max": 100000.0, "step": 0.01}), + }, + + } + RETURN_TYPES = ("FLOAT", "STRING", "INT",) + FUNCTION = "execute" + CATEGORY = "KJNodes/weights" + DESCRIPTION = """ +Converts different value lists/series to another type. +""" + + def detect_input_type(self, input_values): + import pandas as pd + if isinstance(input_values, list): + return 'list' + elif isinstance(input_values, pd.Series): + return 'pandas series' + elif isinstance(input_values, torch.Tensor): + return 'tensor' + else: + raise ValueError("Unsupported input type") + + def execute(self, input_values, output_type, invert, repeat, remap_to_frames=0, interpolation_curve=None, remap_min=0.0, remap_max=1.0, remap_values=False): + import pandas as pd + input_type = self.detect_input_type(input_values) + + if input_type == 'pandas series': + float_values = input_values.tolist() + elif input_type == 'tensor': + float_values = input_values + else: + float_values = input_values + + if invert: + float_values = [1 - value for value in float_values] + + if interpolation_curve is not None: + interpolated_pattern = [] + orig_float_values = float_values + for value in interpolation_curve: + min_val = min(orig_float_values) + max_val = max(orig_float_values) + # Normalize the values to [0, 1] + normalized_values = [(value - min_val) / (max_val - min_val) for value in orig_float_values] + # Interpolate the normalized values to the new frame count + remapped_float_values = np.interp(np.linspace(0, 1, int(remap_to_frames * value)), np.linspace(0, 1, len(normalized_values)), normalized_values).tolist() + interpolated_pattern.extend(remapped_float_values) + float_values = interpolated_pattern + else: + # Remap float_values to match target_frame_amount + if remap_to_frames > 0 and remap_to_frames != len(float_values): + min_val = min(float_values) + max_val = max(float_values) + # Normalize the values to [0, 1] + normalized_values = [(value - min_val) / (max_val - min_val) for value in float_values] + # Interpolate the normalized values to the new frame count + float_values = np.interp(np.linspace(0, 1, remap_to_frames), np.linspace(0, 1, len(normalized_values)), normalized_values).tolist() + + float_values = float_values * repeat + if remap_values: + float_values = self.remap_values(float_values, remap_min, remap_max) + + if output_type == 'list': + out = float_values, + elif output_type == 'pandas series': + out = pd.Series(float_values), + elif output_type == 'tensor': + if input_type == 'pandas series': + out = torch.tensor(float_values.values, dtype=torch.float32), + else: + out = torch.tensor(float_values, dtype=torch.float32), + elif output_type == 'match_input': + out = float_values, + return (out, [str(value) for value in float_values], [int(value) for value in float_values]) + + def remap_values(self, values, target_min, target_max): + # Determine the current range + current_min = min(values) + current_max = max(values) + current_range = current_max - current_min + + # Determine the target range + target_range = target_max - target_min + + # Perform the linear interpolation for each value + remapped_values = [(value - current_min) / current_range * target_range + target_min for value in values] + + return remapped_values + + +class FloatToMask: + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "input_values": ("FLOAT", {"forceInput": True, "default": 0}), + "width": ("INT", {"default": 100, "min": 1}), + "height": ("INT", {"default": 100, "min": 1}), + }, + } + RETURN_TYPES = ("MASK",) + FUNCTION = "execute" + CATEGORY = "KJNodes/masking/generate" + DESCRIPTION = """ +Generates a batch of masks based on the input float values. +The batch size is determined by the length of the input float values. +Each mask is generated with the specified width and height. +""" + + def execute(self, input_values, width, height): + import pandas as pd + # Ensure input_values is a list + if isinstance(input_values, (float, int)): + input_values = [input_values] + elif isinstance(input_values, pd.Series): + input_values = input_values.tolist() + elif isinstance(input_values, list) and all(isinstance(item, list) for item in input_values): + input_values = [item for sublist in input_values for item in sublist] + + # Generate a batch of masks based on the input_values + masks = [] + for value in input_values: + # Assuming value is a float between 0 and 1 representing the mask's intensity + mask = torch.ones((height, width), dtype=torch.float32) * value + masks.append(mask) + masks_out = torch.stack(masks, dim=0) + + return(masks_out,) +class WeightScheduleExtend: + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "input_values_1": ("FLOAT", {"default": 0.0, "forceInput": True}), + "input_values_2": ("FLOAT", {"default": 0.0, "forceInput": True}), + "output_type": ( + [ + 'match_input', + 'list', + 'pandas series', + 'tensor', + ], + { + "default": 'match_input' + }), + }, + + } + RETURN_TYPES = ("FLOAT",) + FUNCTION = "execute" + CATEGORY = "KJNodes/weights" + DESCRIPTION = """ +Extends, and converts if needed, different value lists/series +""" + + def detect_input_type(self, input_values): + import pandas as pd + if isinstance(input_values, list): + return 'list' + elif isinstance(input_values, pd.Series): + return 'pandas series' + elif isinstance(input_values, torch.Tensor): + return 'tensor' + else: + raise ValueError("Unsupported input type") + + def execute(self, input_values_1, input_values_2, output_type): + import pandas as pd + input_type_1 = self.detect_input_type(input_values_1) + input_type_2 = self.detect_input_type(input_values_2) + # Convert input_values_2 to the same format as input_values_1 if they do not match + if not input_type_1 == input_type_2: + print("Converting input_values_2 to the same format as input_values_1") + if input_type_1 == 'pandas series': + # Convert input_values_2 to a pandas Series + float_values_2 = pd.Series(input_values_2) + elif input_type_1 == 'tensor': + # Convert input_values_2 to a tensor + float_values_2 = torch.tensor(input_values_2, dtype=torch.float32) + else: + print("Input types match, no conversion needed") + # If the types match, no conversion is needed + float_values_2 = input_values_2 + + float_values = input_values_1 + float_values_2 + + if output_type == 'list': + return float_values, + elif output_type == 'pandas series': + return pd.Series(float_values), + elif output_type == 'tensor': + if input_type_1 == 'pandas series': + return torch.tensor(float_values.values, dtype=torch.float32), + else: + return torch.tensor(float_values, dtype=torch.float32), + elif output_type == 'match_input': + return float_values, + else: + raise ValueError(f"Unsupported output_type: {output_type}") + +class FloatToSigmas: + @classmethod + def INPUT_TYPES(s): + return {"required": + { + "float_list": ("FLOAT", {"default": 0.0, "forceInput": True}), + } + } + RETURN_TYPES = ("SIGMAS",) + RETURN_NAMES = ("SIGMAS",) + CATEGORY = "KJNodes/noise" + FUNCTION = "customsigmas" + DESCRIPTION = """ +Creates a sigmas tensor from list of float values. + +""" + def customsigmas(self, float_list): + return torch.tensor(float_list, dtype=torch.float32), + +class SigmasToFloat: + @classmethod + def INPUT_TYPES(s): + return {"required": + { + "sigmas": ("SIGMAS",), + } + } + RETURN_TYPES = ("FLOAT",) + RETURN_NAMES = ("float",) + CATEGORY = "KJNodes/noise" + FUNCTION = "customsigmas" + DESCRIPTION = """ +Creates a float list from sigmas tensors. + +""" + def customsigmas(self, sigmas): + return sigmas.tolist(), + +class GLIGENTextBoxApplyBatchCoords: + @classmethod + def INPUT_TYPES(s): + return {"required": {"conditioning_to": ("CONDITIONING", ), + "latents": ("LATENT", ), + "clip": ("CLIP", ), + "gligen_textbox_model": ("GLIGEN", ), + "coordinates": ("STRING", {"forceInput": True}), + "text": ("STRING", {"multiline": True}), + "width": ("INT", {"default": 128, "min": 8, "max": 4096, "step": 8}), + "height": ("INT", {"default": 128, "min": 8, "max": 4096, "step": 8}), + }, + "optional": {"size_multiplier": ("FLOAT", {"default": [1.0], "forceInput": True})}, + } + RETURN_TYPES = ("CONDITIONING", "IMAGE", ) + RETURN_NAMES = ("conditioning", "coord_preview", ) + FUNCTION = "append" + CATEGORY = "KJNodes/experimental" + DESCRIPTION = """ +This node allows scheduling GLIGEN text box positions in a batch, +to be used with AnimateDiff-Evolved. Intended to pair with the +Spline Editor -node. + +GLIGEN model can be downloaded through the Manage's "Install Models" menu. +Or directly from here: +https://huggingface.co/comfyanonymous/GLIGEN_pruned_safetensors/tree/main + +Inputs: +- **latents** input is used to calculate batch size +- **clip** is your standard text encoder, use same as for the main prompt +- **gligen_textbox_model** connects to GLIGEN Loader +- **coordinates** takes a json string of points, directly compatible +with the spline editor node. +- **text** is the part of the prompt to set position for +- **width** and **height** are the size of the GLIGEN bounding box + +Outputs: +- **conditioning** goes between to clip text encode and the sampler +- **coord_preview** is an optional preview of the coordinates and +bounding boxes. + +""" + + def append(self, latents, coordinates, conditioning_to, clip, gligen_textbox_model, text, width, height, size_multiplier=[1.0]): + coordinates = json.loads(coordinates.replace("'", '"')) + coordinates = [(coord['x'], coord['y']) for coord in coordinates] + + batch_size = sum(tensor.size(0) for tensor in latents.values()) + if len(coordinates) != batch_size: + print("GLIGENTextBoxApplyBatchCoords WARNING: The number of coordinates does not match the number of latents") + + c = [] + _, cond_pooled = clip.encode_from_tokens(clip.tokenize(text), return_pooled=True) + + for t in conditioning_to: + n = [t[0], t[1].copy()] + + position_params_batch = [[] for _ in range(batch_size)] # Initialize a list of empty lists for each batch item + if len(size_multiplier) != batch_size: + size_multiplier = size_multiplier * (batch_size // len(size_multiplier)) + size_multiplier[:batch_size % len(size_multiplier)] + + for i in range(batch_size): + x_position, y_position = coordinates[i] + position_param = (cond_pooled, int((height // 8) * size_multiplier[i]), int((width // 8) * size_multiplier[i]), (y_position - height // 2) // 8, (x_position - width // 2) // 8) + position_params_batch[i].append(position_param) # Append position_param to the correct sublist + + prev = [] + if "gligen" in n[1]: + prev = n[1]['gligen'][2] + else: + prev = [[] for _ in range(batch_size)] + # Concatenate prev and position_params_batch, ensuring both are lists of lists + # and each sublist corresponds to a batch item + combined_position_params = [prev_item + batch_item for prev_item, batch_item in zip(prev, position_params_batch)] + n[1]['gligen'] = ("position_batched", gligen_textbox_model, combined_position_params) + c.append(n) + + image_height = latents['samples'].shape[-2] * 8 + image_width = latents['samples'].shape[-1] * 8 + plot_image_tensor = plot_coordinates_to_tensor(coordinates, image_height, image_width, height, width, size_multiplier, text) + + return (c, plot_image_tensor,) + +class CreateInstanceDiffusionTracking: + + RETURN_TYPES = ("TRACKING", "STRING", "INT", "INT", "INT", "INT",) + RETURN_NAMES = ("tracking", "prompt", "width", "height", "bbox_width", "bbox_height",) + FUNCTION = "tracking" + CATEGORY = "KJNodes/InstanceDiffusion" + DESCRIPTION = """ +Creates tracking data to be used with InstanceDiffusion: +https://github.com/logtd/ComfyUI-InstanceDiffusion + +InstanceDiffusion prompt format: +"class_id.class_name": "prompt", +for example: +"1.head": "((head))", +""" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "coordinates": ("STRING", {"forceInput": True}), + "width": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}), + "height": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}), + "bbox_width": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}), + "bbox_height": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}), + "class_name": ("STRING", {"default": "class_name"}), + "class_id": ("INT", {"default": 0,"min": 0, "max": 255, "step": 1}), + "prompt": ("STRING", {"default": "prompt", "multiline": True}), + }, + "optional": { + "size_multiplier": ("FLOAT", {"default": [1.0], "forceInput": True}), + "fit_in_frame": ("BOOLEAN", {"default": True}), + } + } + + def tracking(self, coordinates, class_name, class_id, width, height, bbox_width, bbox_height, prompt, size_multiplier=[1.0], fit_in_frame=True): + # Define the number of images in the batch + coordinates = coordinates.replace("'", '"') + coordinates = json.loads(coordinates) + + tracked = {} + tracked[class_name] = {} + batch_size = len(coordinates) + # Initialize a list to hold the coordinates for the current ID + id_coordinates = [] + if not size_multiplier or len(size_multiplier) != batch_size: + size_multiplier = [0] * batch_size + else: + size_multiplier = size_multiplier * (batch_size // len(size_multiplier)) + size_multiplier[:batch_size % len(size_multiplier)] + for i, coord in enumerate(coordinates): + x = coord['x'] + y = coord['y'] + adjusted_bbox_width = bbox_width * size_multiplier[i] + adjusted_bbox_height = bbox_height * size_multiplier[i] + # Calculate the top left and bottom right coordinates + top_left_x = x - adjusted_bbox_width // 2 + top_left_y = y - adjusted_bbox_height // 2 + bottom_right_x = x + adjusted_bbox_width // 2 + bottom_right_y = y + adjusted_bbox_height // 2 + + if fit_in_frame: + # Clip the coordinates to the frame boundaries + top_left_x = max(0, top_left_x) + top_left_y = max(0, top_left_y) + bottom_right_x = min(width, bottom_right_x) + bottom_right_y = min(height, bottom_right_y) + # Ensure width and height are positive + adjusted_bbox_width = max(1, bottom_right_x - top_left_x) + adjusted_bbox_height = max(1, bottom_right_y - top_left_y) + + # Update the coordinates with the new width and height + bottom_right_x = top_left_x + adjusted_bbox_width + bottom_right_y = top_left_y + adjusted_bbox_height + + # Append the top left and bottom right coordinates to the list for the current ID + id_coordinates.append([top_left_x, top_left_y, bottom_right_x, bottom_right_y, width, height]) + + class_id = int(class_id) + # Assign the list of coordinates to the specified ID within the class_id dictionary + tracked[class_name][class_id] = id_coordinates + + prompt_string = "" + for class_name, class_data in tracked.items(): + for class_id in class_data.keys(): + class_id_str = str(class_id) + # Use the incoming prompt for each class name and ID + prompt_string += f'"{class_id_str}.{class_name}": "({prompt})",\n' + + # Remove the last comma and newline + prompt_string = prompt_string.rstrip(",\n") + + return (tracked, prompt_string, width, height, bbox_width, bbox_height) + +class AppendInstanceDiffusionTracking: + + RETURN_TYPES = ("TRACKING", "STRING",) + RETURN_NAMES = ("tracking", "prompt",) + FUNCTION = "append" + CATEGORY = "KJNodes/InstanceDiffusion" + DESCRIPTION = """ +Appends tracking data to be used with InstanceDiffusion: +https://github.com/logtd/ComfyUI-InstanceDiffusion + +""" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "tracking_1": ("TRACKING", {"forceInput": True}), + "tracking_2": ("TRACKING", {"forceInput": True}), + }, + "optional": { + "prompt_1": ("STRING", {"default": "", "forceInput": True}), + "prompt_2": ("STRING", {"default": "", "forceInput": True}), + } + } + + def append(self, tracking_1, tracking_2, prompt_1="", prompt_2=""): + tracking_copy = tracking_1.copy() + # Check for existing class names and class IDs, and raise an error if they exist + for class_name, class_data in tracking_2.items(): + if class_name not in tracking_copy: + tracking_copy[class_name] = class_data + else: + # If the class name exists, merge the class data from tracking_2 into tracking_copy + # This will add new class IDs under the same class name without raising an error + tracking_copy[class_name].update(class_data) + prompt_string = prompt_1 + "," + prompt_2 + return (tracking_copy, prompt_string) + +class InterpolateCoords: + + RETURN_TYPES = ("STRING",) + RETURN_NAMES = ("coordinates",) + FUNCTION = "interpolate" + CATEGORY = "KJNodes/experimental" + DESCRIPTION = """ +Interpolates coordinates based on a curve. +""" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "coordinates": ("STRING", {"forceInput": True}), + "interpolation_curve": ("FLOAT", {"forceInput": True}), + + }, + } + + def interpolate(self, coordinates, interpolation_curve): + # Parse the JSON string to get the list of coordinates + coordinates = json.loads(coordinates.replace("'", '"')) + + # Convert the list of dictionaries to a list of (x, y) tuples for easier processing + coordinates = [(coord['x'], coord['y']) for coord in coordinates] + + # Calculate the total length of the original path + path_length = sum(np.linalg.norm(np.array(coordinates[i]) - np.array(coordinates[i-1])) + for i in range(1, len(coordinates))) + + # Initialize variables for interpolation + interpolated_coords = [] + current_length = 0 + current_index = 0 + + # Iterate over the normalized curve + for normalized_length in interpolation_curve: + target_length = normalized_length * path_length # Convert to the original scale + while current_index < len(coordinates) - 1: + segment_start, segment_end = np.array(coordinates[current_index]), np.array(coordinates[current_index + 1]) + segment_length = np.linalg.norm(segment_end - segment_start) + if current_length + segment_length >= target_length: + break + current_length += segment_length + current_index += 1 + + # Interpolate between the last two points + if current_index < len(coordinates) - 1: + p1, p2 = np.array(coordinates[current_index]), np.array(coordinates[current_index + 1]) + segment_length = np.linalg.norm(p2 - p1) + if segment_length > 0: + t = (target_length - current_length) / segment_length + interpolated_point = p1 + t * (p2 - p1) + interpolated_coords.append(interpolated_point.tolist()) + else: + interpolated_coords.append(p1.tolist()) + else: + # If the target_length is at or beyond the end of the path, add the last coordinate + interpolated_coords.append(coordinates[-1]) + + # Convert back to string format if necessary + interpolated_coords_str = "[" + ", ".join([f"{{'x': {round(coord[0])}, 'y': {round(coord[1])}}}" for coord in interpolated_coords]) + "]" + print(interpolated_coords_str) + + return (interpolated_coords_str,) + +class DrawInstanceDiffusionTracking: + + RETURN_TYPES = ("IMAGE",) + RETURN_NAMES = ("image", ) + FUNCTION = "draw" + CATEGORY = "KJNodes/InstanceDiffusion" + DESCRIPTION = """ +Draws the tracking data from +CreateInstanceDiffusionTracking -node. + +""" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "image": ("IMAGE", ), + "tracking": ("TRACKING", {"forceInput": True}), + "box_line_width": ("INT", {"default": 2, "min": 1, "max": 10, "step": 1}), + "draw_text": ("BOOLEAN", {"default": True}), + "font": (folder_paths.get_filename_list("kjnodes_fonts"), ), + "font_size": ("INT", {"default": 20}), + }, + } + + def draw(self, image, tracking, box_line_width, draw_text, font, font_size): + import matplotlib.cm as cm + + modified_images = [] + + colormap = cm.get_cmap('rainbow', len(tracking)) + if draw_text: + font_path = folder_paths.get_full_path("kjnodes_fonts", font) + font = ImageFont.truetype(font_path, font_size) + + # Iterate over each image in the batch + for i in range(image.shape[0]): + # Extract the current image and convert it to a PIL image + current_image = image[i, :, :, :].permute(2, 0, 1) + pil_image = transforms.ToPILImage()(current_image) + + draw = ImageDraw.Draw(pil_image) + + # Iterate over the bounding boxes for the current image + for j, (class_name, class_data) in enumerate(tracking.items()): + for class_id, bbox_list in class_data.items(): + # Check if the current index is within the bounds of the bbox_list + if i < len(bbox_list): + bbox = bbox_list[i] + # Ensure bbox is a list or tuple before unpacking + if isinstance(bbox, (list, tuple)): + x1, y1, x2, y2, _, _ = bbox + # Convert coordinates to integers + x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2) + # Generate a color from the rainbow colormap + color = tuple(int(255 * x) for x in colormap(j / len(tracking)))[:3] + # Draw the bounding box on the image with the generated color + draw.rectangle([x1, y1, x2, y2], outline=color, width=box_line_width) + if draw_text: + # Draw the class name and ID as text above the box with the generated color + text = f"{class_id}.{class_name}" + # Calculate the width and height of the text + _, _, text_width, text_height = draw.textbbox((0, 0), text=text, font=font) + # Position the text above the top-left corner of the box + text_position = (x1, y1 - text_height) + draw.text(text_position, text, fill=color, font=font) + else: + print(f"Unexpected data type for bbox: {type(bbox)}") + + # Convert the drawn image back to a torch tensor and adjust back to (H, W, C) + modified_image_tensor = transforms.ToTensor()(pil_image).permute(1, 2, 0) + modified_images.append(modified_image_tensor) + + # Stack the modified images back into a batch + image_tensor_batch = torch.stack(modified_images).cpu().float() + + return image_tensor_batch, + +class PointsEditor: + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "points_store": ("STRING", {"multiline": False}), + "coordinates": ("STRING", {"multiline": False}), + "neg_coordinates": ("STRING", {"multiline": False}), + "bbox_store": ("STRING", {"multiline": False}), + "bboxes": ("STRING", {"multiline": False}), + "bbox_format": ( + [ + 'xyxy', + 'xywh', + ], + ), + "width": ("INT", {"default": 512, "min": 8, "max": 4096, "step": 8}), + "height": ("INT", {"default": 512, "min": 8, "max": 4096, "step": 8}), + "normalize": ("BOOLEAN", {"default": False}), + }, + "optional": { + "bg_image": ("IMAGE", ), + }, + } + + RETURN_TYPES = ("STRING", "STRING", "BBOX", "MASK", "IMAGE") + RETURN_NAMES = ("positive_coords", "negative_coords", "bbox", "bbox_mask", "cropped_image") + FUNCTION = "pointdata" + CATEGORY = "KJNodes/experimental" + DESCRIPTION = """ +# WORK IN PROGRESS +Do not count on this as part of your workflow yet, +probably contains lots of bugs and stability is not +guaranteed!! + +## Graphical editor to create coordinates + +**Shift + click** to add a positive (green) point. +**Shift + right click** to add a negative (red) point. +**Ctrl + click** to draw a box. +**Right click on a point** to delete it. +Note that you can't delete from start/end of the points array. + +To add an image select the node and copy/paste or drag in the image. +Or from the bg_image input on queue (first frame of the batch). + +**THE IMAGE IS SAVED TO THE NODE AND WORKFLOW METADATA** +you can clear the image from the context menu by right clicking on the canvas + +""" + + def pointdata(self, points_store, bbox_store, width, height, coordinates, neg_coordinates, normalize, bboxes, bbox_format="xyxy", bg_image=None): + coordinates = json.loads(coordinates) + pos_coordinates = [] + for coord in coordinates: + coord['x'] = int(round(coord['x'])) + coord['y'] = int(round(coord['y'])) + if normalize: + norm_x = coord['x'] / width + norm_y = coord['y'] / height + pos_coordinates.append({'x': norm_x, 'y': norm_y}) + else: + pos_coordinates.append({'x': coord['x'], 'y': coord['y']}) + + if neg_coordinates: + coordinates = json.loads(neg_coordinates) + neg_coordinates = [] + for coord in coordinates: + coord['x'] = int(round(coord['x'])) + coord['y'] = int(round(coord['y'])) + if normalize: + norm_x = coord['x'] / width + norm_y = coord['y'] / height + neg_coordinates.append({'x': norm_x, 'y': norm_y}) + else: + neg_coordinates.append({'x': coord['x'], 'y': coord['y']}) + + # Create a blank mask + mask = np.zeros((height, width), dtype=np.uint8) + bboxes = json.loads(bboxes) + print(bboxes) + valid_bboxes = [] + for bbox in bboxes: + if (bbox.get("startX") is None or + bbox.get("startY") is None or + bbox.get("endX") is None or + bbox.get("endY") is None): + continue # Skip this bounding box if any value is None + else: + # Ensure that endX and endY are greater than startX and startY + x_min = min(int(bbox["startX"]), int(bbox["endX"])) + y_min = min(int(bbox["startY"]), int(bbox["endY"])) + x_max = max(int(bbox["startX"]), int(bbox["endX"])) + y_max = max(int(bbox["startY"]), int(bbox["endY"])) + + valid_bboxes.append((x_min, y_min, x_max, y_max)) + + bboxes_xyxy = [] + for bbox in valid_bboxes: + x_min, y_min, x_max, y_max = bbox + bboxes_xyxy.append((x_min, y_min, x_max, y_max)) + mask[y_min:y_max, x_min:x_max] = 1 # Fill the bounding box area with 1s + + if bbox_format == "xywh": + bboxes_xywh = [] + for bbox in valid_bboxes: + x_min, y_min, x_max, y_max = bbox + width = x_max - x_min + height = y_max - y_min + bboxes_xywh.append((x_min, y_min, width, height)) + bboxes = bboxes_xywh + else: + bboxes = bboxes_xyxy + + mask_tensor = torch.from_numpy(mask) + mask_tensor = mask_tensor.unsqueeze(0).float().cpu() + + if bg_image is not None and len(valid_bboxes) > 0: + x_min, y_min, x_max, y_max = bboxes[0] + cropped_image = bg_image[:, y_min:y_max, x_min:x_max, :] + + elif bg_image is not None: + cropped_image = bg_image + + if bg_image is None: + return (json.dumps(pos_coordinates), json.dumps(neg_coordinates), bboxes, mask_tensor) + else: + transform = transforms.ToPILImage() + image = transform(bg_image[0].permute(2, 0, 1)) + buffered = io.BytesIO() + image.save(buffered, format="JPEG", quality=75) + + # Step 3: Encode the image bytes to a Base64 string + img_bytes = buffered.getvalue() + img_base64 = base64.b64encode(img_bytes).decode('utf-8') + + return { + "ui": {"bg_image": [img_base64]}, + "result": (json.dumps(pos_coordinates), json.dumps(neg_coordinates), bboxes, mask_tensor, cropped_image) + } + +class CutAndDragOnPath: + RETURN_TYPES = ("IMAGE", "MASK",) + RETURN_NAMES = ("image","mask", ) + FUNCTION = "cutanddrag" + CATEGORY = "KJNodes/image" + DESCRIPTION = """ +Cuts the masked area from the image, and drags it along the path. If inpaint is enabled, and no bg_image is provided, the cut area is filled using cv2 TELEA algorithm. +""" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "image": ("IMAGE",), + "coordinates": ("STRING", {"forceInput": True}), + "mask": ("MASK",), + "frame_width": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}), + "frame_height": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}), + "inpaint": ("BOOLEAN", {"default": True}), + }, + "optional": { + "bg_image": ("IMAGE",), + } + } + + def cutanddrag(self, image, coordinates, mask, frame_width, frame_height, inpaint, bg_image=None): + # Parse coordinates + if len(coordinates) < 10: + coords_list = [] + for coords in coordinates: + coords = json.loads(coords.replace("'", '"')) + coords_list.append(coords) + else: + coords = json.loads(coordinates.replace("'", '"')) + coords_list = [coords] + + batch_size = len(coords_list[0]) + images_list = [] + masks_list = [] + + # Convert input image and mask to PIL + input_image = tensor2pil(image)[0] + input_mask = tensor2pil(mask)[0] + + # Find masked region bounds + mask_array = np.array(input_mask) + y_indices, x_indices = np.where(mask_array > 0) + if len(x_indices) == 0 or len(y_indices) == 0: + return (image, mask) + + x_min, x_max = x_indices.min(), x_indices.max() + y_min, y_max = y_indices.min(), y_indices.max() + + # Cut out the masked region + cut_width = x_max - x_min + cut_height = y_max - y_min + cut_image = input_image.crop((x_min, y_min, x_max, y_max)) + cut_mask = input_mask.crop((x_min, y_min, x_max, y_max)) + + # Create inpainted background + if bg_image is None: + background = input_image.copy() + # Inpaint the cut area + if inpaint: + import cv2 + border = 5 # Create small border around cut area for better inpainting + fill_mask = Image.new("L", background.size, 0) + draw = ImageDraw.Draw(fill_mask) + draw.rectangle([x_min-border, y_min-border, x_max+border, y_max+border], fill=255) + background = cv2.inpaint( + np.array(background), + np.array(fill_mask), + inpaintRadius=3, + flags=cv2.INPAINT_TELEA + ) + background = Image.fromarray(background) + else: + background = tensor2pil(bg_image)[0] + + # Create batch of images with cut region at different positions + for i in range(batch_size): + # Create new image + new_image = background.copy() + new_mask = Image.new("L", (frame_width, frame_height), 0) + + # Get target position from coordinates + for coords in coords_list: + target_x = int(coords[i]['x'] - cut_width/2) + target_y = int(coords[i]['y'] - cut_height/2) + + # Paste cut region at new position + new_image.paste(cut_image, (target_x, target_y), cut_mask) + new_mask.paste(cut_mask, (target_x, target_y)) + + # Convert to tensor and append + image_tensor = pil2tensor(new_image) + mask_tensor = pil2tensor(new_mask) + + images_list.append(image_tensor) + masks_list.append(mask_tensor) + + # Stack tensors into batches + out_images = torch.cat(images_list, dim=0).cpu().float() + out_masks = torch.cat(masks_list, dim=0) + + return (out_images, out_masks) \ No newline at end of file diff --git a/custom_nodes/ComfyUI-KJNodes-main/nodes/image_nodes.py b/custom_nodes/ComfyUI-KJNodes-main/nodes/image_nodes.py new file mode 100644 index 0000000000000000000000000000000000000000..74570e1ad071815a3d6cf4a9878e752d7a4196fe --- /dev/null +++ b/custom_nodes/ComfyUI-KJNodes-main/nodes/image_nodes.py @@ -0,0 +1,3157 @@ +import numpy as np +import time +import torch +import torch.nn.functional as F +import torchvision.transforms as T +import io +import base64 +import random +import math +import os +import re +import json +from PIL.PngImagePlugin import PngInfo +try: + import cv2 +except: + print("OpenCV not installed") + pass +from PIL import ImageGrab, ImageDraw, ImageFont, Image, ImageSequence, ImageOps + +from nodes import MAX_RESOLUTION, SaveImage +from comfy_extras.nodes_mask import ImageCompositeMasked +from comfy.cli_args import args +from comfy.utils import ProgressBar, common_upscale +import folder_paths +import model_management + +script_directory = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + +class ImagePass: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + }, + "optional": { + "image": ("IMAGE",), + }, + } + RETURN_TYPES = ("IMAGE",) + FUNCTION = "passthrough" + CATEGORY = "KJNodes/image" + DESCRIPTION = """ +Passes the image through without modifying it. +""" + + def passthrough(self, image=None): + return image, + +class ColorMatch: + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "image_ref": ("IMAGE",), + "image_target": ("IMAGE",), + "method": ( + [ + 'mkl', + 'hm', + 'reinhard', + 'mvgd', + 'hm-mvgd-hm', + 'hm-mkl-hm', + ], { + "default": 'mkl' + }), + }, + "optional": { + "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), + } + } + + CATEGORY = "KJNodes/image" + + RETURN_TYPES = ("IMAGE",) + RETURN_NAMES = ("image",) + FUNCTION = "colormatch" + DESCRIPTION = """ +color-matcher enables color transfer across images which comes in handy for automatic +color-grading of photographs, paintings and film sequences as well as light-field +and stopmotion corrections. + +The methods behind the mappings are based on the approach from Reinhard et al., +the Monge-Kantorovich Linearization (MKL) as proposed by Pitie et al. and our analytical solution +to a Multi-Variate Gaussian Distribution (MVGD) transfer in conjunction with classical histogram +matching. As shown below our HM-MVGD-HM compound outperforms existing methods. +https://github.com/hahnec/color-matcher/ + +""" + + def colormatch(self, image_ref, image_target, method, strength=1.0): + try: + from color_matcher import ColorMatcher + except: + raise Exception("Can't import color-matcher, did you install requirements.txt? Manual install: pip install color-matcher") + cm = ColorMatcher() + image_ref = image_ref.cpu() + image_target = image_target.cpu() + batch_size = image_target.size(0) + out = [] + images_target = image_target.squeeze() + images_ref = image_ref.squeeze() + + image_ref_np = images_ref.numpy() + images_target_np = images_target.numpy() + + if image_ref.size(0) > 1 and image_ref.size(0) != batch_size: + raise ValueError("ColorMatch: Use either single reference image or a matching batch of reference images.") + + for i in range(batch_size): + image_target_np = images_target_np if batch_size == 1 else images_target[i].numpy() + image_ref_np_i = image_ref_np if image_ref.size(0) == 1 else images_ref[i].numpy() + try: + image_result = cm.transfer(src=image_target_np, ref=image_ref_np_i, method=method) + except BaseException as e: + print(f"Error occurred during transfer: {e}") + break + # Apply the strength multiplier + image_result = image_target_np + strength * (image_result - image_target_np) + out.append(torch.from_numpy(image_result)) + + out = torch.stack(out, dim=0).to(torch.float32) + out.clamp_(0, 1) + return (out,) + +class SaveImageWithAlpha: + def __init__(self): + self.output_dir = folder_paths.get_output_directory() + self.type = "output" + self.prefix_append = "" + + @classmethod + def INPUT_TYPES(s): + return {"required": + {"images": ("IMAGE", ), + "mask": ("MASK", ), + "filename_prefix": ("STRING", {"default": "ComfyUI"})}, + "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"}, + } + + RETURN_TYPES = () + FUNCTION = "save_images_alpha" + OUTPUT_NODE = True + CATEGORY = "KJNodes/image" + DESCRIPTION = """ +Saves an image and mask as .PNG with the mask as the alpha channel. +""" + + def save_images_alpha(self, images, mask, filename_prefix="ComfyUI_image_with_alpha", prompt=None, extra_pnginfo=None): + from PIL.PngImagePlugin import PngInfo + filename_prefix += self.prefix_append + full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(filename_prefix, self.output_dir, images[0].shape[1], images[0].shape[0]) + results = list() + if mask.dtype == torch.float16: + mask = mask.to(torch.float32) + def file_counter(): + max_counter = 0 + # Loop through the existing files + for existing_file in os.listdir(full_output_folder): + # Check if the file matches the expected format + match = re.fullmatch(fr"{filename}_(\d+)_?\.[a-zA-Z0-9]+", existing_file) + if match: + # Extract the numeric portion of the filename + file_counter = int(match.group(1)) + # Update the maximum counter value if necessary + if file_counter > max_counter: + max_counter = file_counter + return max_counter + + for image, alpha in zip(images, mask): + i = 255. * image.cpu().numpy() + a = 255. * alpha.cpu().numpy() + img = Image.fromarray(np.clip(i, 0, 255).astype(np.uint8)) + + # Resize the mask to match the image size + a_resized = Image.fromarray(a).resize(img.size, Image.LANCZOS) + a_resized = np.clip(a_resized, 0, 255).astype(np.uint8) + img.putalpha(Image.fromarray(a_resized, mode='L')) + metadata = None + if not args.disable_metadata: + metadata = PngInfo() + if prompt is not None: + metadata.add_text("prompt", json.dumps(prompt)) + if extra_pnginfo is not None: + for x in extra_pnginfo: + metadata.add_text(x, json.dumps(extra_pnginfo[x])) + + # Increment the counter by 1 to get the next available value + counter = file_counter() + 1 + file = f"{filename}_{counter:05}.png" + img.save(os.path.join(full_output_folder, file), pnginfo=metadata, compress_level=4) + results.append({ + "filename": file, + "subfolder": subfolder, + "type": self.type + }) + + return { "ui": { "images": results } } + +class ImageConcanate: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "image1": ("IMAGE",), + "image2": ("IMAGE",), + "direction": ( + [ 'right', + 'down', + 'left', + 'up', + ], + { + "default": 'right' + }), + "match_image_size": ("BOOLEAN", {"default": True}), + }} + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "concatenate" + CATEGORY = "KJNodes/image" + DESCRIPTION = """ +Concatenates the image2 to image1 in the specified direction. +""" + + def concatenate(self, image1, image2, direction, match_image_size, first_image_shape=None): + # Check if the batch sizes are different + batch_size1 = image1.shape[0] + batch_size2 = image2.shape[0] + + if batch_size1 != batch_size2: + # Calculate the number of repetitions needed + max_batch_size = max(batch_size1, batch_size2) + repeats1 = max_batch_size - batch_size1 + repeats2 = max_batch_size - batch_size2 + + # Repeat the last image to match the largest batch size + if repeats1 > 0: + last_image1 = image1[-1].unsqueeze(0).repeat(repeats1, 1, 1, 1) + image1 = torch.cat([image1.clone(), last_image1], dim=0) + if repeats2 > 0: + last_image2 = image2[-1].unsqueeze(0).repeat(repeats2, 1, 1, 1) + image2 = torch.cat([image2.clone(), last_image2], dim=0) + + if match_image_size: + # Use first_image_shape if provided; otherwise, default to image1's shape + target_shape = first_image_shape if first_image_shape is not None else image1.shape + + original_height = image2.shape[1] + original_width = image2.shape[2] + original_aspect_ratio = original_width / original_height + + if direction in ['left', 'right']: + # Match the height and adjust the width to preserve aspect ratio + target_height = target_shape[1] # B, H, W, C format + target_width = int(target_height * original_aspect_ratio) + elif direction in ['up', 'down']: + # Match the width and adjust the height to preserve aspect ratio + target_width = target_shape[2] # B, H, W, C format + target_height = int(target_width / original_aspect_ratio) + + # Adjust image2 to the expected format for common_upscale + image2_for_upscale = image2.movedim(-1, 1) # Move C to the second position (B, C, H, W) + + # Resize image2 to match the target size while preserving aspect ratio + image2_resized = common_upscale(image2_for_upscale, target_width, target_height, "lanczos", "disabled") + + # Adjust image2 back to the original format (B, H, W, C) after resizing + image2_resized = image2_resized.movedim(1, -1) + else: + image2_resized = image2 + + # Ensure both images have the same number of channels + channels_image1 = image1.shape[-1] + channels_image2 = image2_resized.shape[-1] + + if channels_image1 != channels_image2: + if channels_image1 < channels_image2: + # Add alpha channel to image1 if image2 has it + alpha_channel = torch.ones((*image1.shape[:-1], channels_image2 - channels_image1), device=image1.device) + image1 = torch.cat((image1, alpha_channel), dim=-1) + else: + # Add alpha channel to image2 if image1 has it + alpha_channel = torch.ones((*image2_resized.shape[:-1], channels_image1 - channels_image2), device=image2_resized.device) + image2_resized = torch.cat((image2_resized, alpha_channel), dim=-1) + + + # Concatenate based on the specified direction + if direction == 'right': + concatenated_image = torch.cat((image1, image2_resized), dim=2) # Concatenate along width + elif direction == 'down': + concatenated_image = torch.cat((image1, image2_resized), dim=1) # Concatenate along height + elif direction == 'left': + concatenated_image = torch.cat((image2_resized, image1), dim=2) # Concatenate along width + elif direction == 'up': + concatenated_image = torch.cat((image2_resized, image1), dim=1) # Concatenate along height + return concatenated_image, + +import torch # Make sure you have PyTorch installed + +class ImageConcatFromBatch: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "images": ("IMAGE",), + "num_columns": ("INT", {"default": 3, "min": 1, "max": 255, "step": 1}), + "match_image_size": ("BOOLEAN", {"default": False}), + "max_resolution": ("INT", {"default": 4096}), + }, + } + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "concat" + CATEGORY = "KJNodes/image" + DESCRIPTION = """ + Concatenates images from a batch into a grid with a specified number of columns. + """ + + def concat(self, images, num_columns, match_image_size, max_resolution): + # Assuming images is a batch of images (B, H, W, C) + batch_size, height, width, channels = images.shape + num_rows = (batch_size + num_columns - 1) // num_columns # Calculate number of rows + + print(f"Initial dimensions: batch_size={batch_size}, height={height}, width={width}, channels={channels}") + print(f"num_rows={num_rows}, num_columns={num_columns}") + + if match_image_size: + target_shape = images[0].shape + + resized_images = [] + for image in images: + original_height = image.shape[0] + original_width = image.shape[1] + original_aspect_ratio = original_width / original_height + + if original_aspect_ratio > 1: + target_height = target_shape[0] + target_width = int(target_height * original_aspect_ratio) + else: + target_width = target_shape[1] + target_height = int(target_width / original_aspect_ratio) + + print(f"Resizing image from ({original_height}, {original_width}) to ({target_height}, {target_width})") + + # Resize the image to match the target size while preserving aspect ratio + resized_image = common_upscale(image.movedim(-1, 0), target_width, target_height, "lanczos", "disabled") + resized_image = resized_image.movedim(0, -1) # Move channels back to the last dimension + resized_images.append(resized_image) + + # Convert the list of resized images back to a tensor + images = torch.stack(resized_images) + + height, width = target_shape[:2] # Update height and width + + # Initialize an empty grid + grid_height = num_rows * height + grid_width = num_columns * width + + print(f"Grid dimensions before scaling: grid_height={grid_height}, grid_width={grid_width}") + + # Original scale factor calculation remains unchanged + scale_factor = min(max_resolution / grid_height, max_resolution / grid_width, 1.0) + + # Apply scale factor to height and width + scaled_height = height * scale_factor + scaled_width = width * scale_factor + + # Round scaled dimensions to the nearest number divisible by 8 + height = max(1, int(round(scaled_height / 8) * 8)) + width = max(1, int(round(scaled_width / 8) * 8)) + + if abs(scaled_height - height) > 4: + height = max(1, int(round((scaled_height + 4) / 8) * 8)) + if abs(scaled_width - width) > 4: + width = max(1, int(round((scaled_width + 4) / 8) * 8)) + + # Recalculate grid dimensions with adjusted height and width + grid_height = num_rows * height + grid_width = num_columns * width + print(f"Grid dimensions after scaling: grid_height={grid_height}, grid_width={grid_width}") + print(f"Final image dimensions: height={height}, width={width}") + + grid = torch.zeros((grid_height, grid_width, channels), dtype=images.dtype) + + for idx, image in enumerate(images): + resized_image = torch.nn.functional.interpolate(image.unsqueeze(0).permute(0, 3, 1, 2), size=(height, width), mode="bilinear").squeeze().permute(1, 2, 0) + row = idx // num_columns + col = idx % num_columns + grid[row*height:(row+1)*height, col*width:(col+1)*width, :] = resized_image + + return grid.unsqueeze(0), + + +class ImageGridComposite2x2: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "image1": ("IMAGE",), + "image2": ("IMAGE",), + "image3": ("IMAGE",), + "image4": ("IMAGE",), + }} + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "compositegrid" + CATEGORY = "KJNodes/image" + DESCRIPTION = """ +Concatenates the 4 input images into a 2x2 grid. +""" + + def compositegrid(self, image1, image2, image3, image4): + top_row = torch.cat((image1, image2), dim=2) + bottom_row = torch.cat((image3, image4), dim=2) + grid = torch.cat((top_row, bottom_row), dim=1) + return (grid,) + +class ImageGridComposite3x3: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "image1": ("IMAGE",), + "image2": ("IMAGE",), + "image3": ("IMAGE",), + "image4": ("IMAGE",), + "image5": ("IMAGE",), + "image6": ("IMAGE",), + "image7": ("IMAGE",), + "image8": ("IMAGE",), + "image9": ("IMAGE",), + }} + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "compositegrid" + CATEGORY = "KJNodes/image" + DESCRIPTION = """ +Concatenates the 9 input images into a 3x3 grid. +""" + + def compositegrid(self, image1, image2, image3, image4, image5, image6, image7, image8, image9): + top_row = torch.cat((image1, image2, image3), dim=2) + mid_row = torch.cat((image4, image5, image6), dim=2) + bottom_row = torch.cat((image7, image8, image9), dim=2) + grid = torch.cat((top_row, mid_row, bottom_row), dim=1) + return (grid,) + +class ImageBatchTestPattern: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "batch_size": ("INT", {"default": 1,"min": 1, "max": 255, "step": 1}), + "start_from": ("INT", {"default": 0,"min": 0, "max": 255, "step": 1}), + "text_x": ("INT", {"default": 256,"min": 0, "max": 4096, "step": 1}), + "text_y": ("INT", {"default": 256,"min": 0, "max": 4096, "step": 1}), + "width": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}), + "height": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}), + "font": (folder_paths.get_filename_list("kjnodes_fonts"), ), + "font_size": ("INT", {"default": 255,"min": 8, "max": 4096, "step": 1}), + }} + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "generatetestpattern" + CATEGORY = "KJNodes/text" + + def generatetestpattern(self, batch_size, font, font_size, start_from, width, height, text_x, text_y): + out = [] + # Generate the sequential numbers for each image + numbers = np.arange(start_from, start_from + batch_size) + font_path = folder_paths.get_full_path("kjnodes_fonts", font) + + for number in numbers: + # Create a black image with the number as a random color text + image = Image.new("RGB", (width, height), color='black') + draw = ImageDraw.Draw(image) + + # Generate a random color for the text + font_color = (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)) + + font = ImageFont.truetype(font_path, font_size) + + # Get the size of the text and position it in the center + text = str(number) + + try: + draw.text((text_x, text_y), text, font=font, fill=font_color, features=['-liga']) + except: + draw.text((text_x, text_y), text, font=font, fill=font_color,) + + # Convert the image to a numpy array and normalize the pixel values + image_np = np.array(image).astype(np.float32) / 255.0 + image_tensor = torch.from_numpy(image_np).unsqueeze(0) + out.append(image_tensor) + out_tensor = torch.cat(out, dim=0) + + return (out_tensor,) + +class ImageGrabPIL: + + @classmethod + def IS_CHANGED(cls): + + return + + RETURN_TYPES = ("IMAGE",) + RETURN_NAMES = ("image",) + FUNCTION = "screencap" + CATEGORY = "KJNodes/image" + DESCRIPTION = """ +Captures an area specified by screen coordinates. +Can be used for realtime diffusion with autoqueue. +""" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "x": ("INT", {"default": 0,"min": 0, "max": 4096, "step": 1}), + "y": ("INT", {"default": 0,"min": 0, "max": 4096, "step": 1}), + "width": ("INT", {"default": 512,"min": 0, "max": 4096, "step": 1}), + "height": ("INT", {"default": 512,"min": 0, "max": 4096, "step": 1}), + "num_frames": ("INT", {"default": 1,"min": 1, "max": 255, "step": 1}), + "delay": ("FLOAT", {"default": 0.1,"min": 0.0, "max": 10.0, "step": 0.01}), + }, + } + + def screencap(self, x, y, width, height, num_frames, delay): + start_time = time.time() + captures = [] + bbox = (x, y, x + width, y + height) + + for _ in range(num_frames): + # Capture screen + screen_capture = ImageGrab.grab(bbox=bbox) + screen_capture_torch = torch.from_numpy(np.array(screen_capture, dtype=np.float32) / 255.0).unsqueeze(0) + captures.append(screen_capture_torch) + + # Wait for a short delay if more than one frame is to be captured + if num_frames > 1: + time.sleep(delay) + + elapsed_time = time.time() - start_time + print(f"screengrab took {elapsed_time} seconds.") + + return (torch.cat(captures, dim=0),) + +class Screencap_mss: + + @classmethod + def IS_CHANGED(s, **kwargs): + return float("NaN") + + RETURN_TYPES = ("IMAGE",) + RETURN_NAMES = ("image",) + FUNCTION = "screencap" + CATEGORY = "KJNodes/image" + DESCRIPTION = """ +Captures an area specified by screen coordinates. +Can be used for realtime diffusion with autoqueue. +""" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "x": ("INT", {"default": 0,"min": 0, "max": 10000, "step": 1}), + "y": ("INT", {"default": 0,"min": 0, "max": 10000, "step": 1}), + "width": ("INT", {"default": 512,"min": 0, "max": 10000, "step": 1}), + "height": ("INT", {"default": 512,"min": 0, "max": 10000, "step": 1}), + "num_frames": ("INT", {"default": 1,"min": 1, "max": 255, "step": 1}), + "delay": ("FLOAT", {"default": 0.1,"min": 0.0, "max": 10.0, "step": 0.01}), + }, + } + + def screencap(self, x, y, width, height, num_frames, delay): + from mss import mss + captures = [] + with mss() as sct: + bbox = {'top': y, 'left': x, 'width': width, 'height': height} + + for _ in range(num_frames): + sct_img = sct.grab(bbox) + img_np = np.array(sct_img) + img_torch = torch.from_numpy(img_np[..., [2, 1, 0]]).float() / 255.0 + captures.append(img_torch) + + if num_frames > 1: + time.sleep(delay) + + return (torch.stack(captures, 0),) + +class WebcamCaptureCV2: + + @classmethod + def IS_CHANGED(cls): + return + + RETURN_TYPES = ("IMAGE",) + RETURN_NAMES = ("image",) + FUNCTION = "capture" + CATEGORY = "KJNodes/experimental" + DESCRIPTION = """ +Captures a frame from a webcam using CV2. +Can be used for realtime diffusion with autoqueue. +""" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "x": ("INT", {"default": 0,"min": 0, "max": 4096, "step": 1}), + "y": ("INT", {"default": 0,"min": 0, "max": 4096, "step": 1}), + "width": ("INT", {"default": 512,"min": 0, "max": 4096, "step": 1}), + "height": ("INT", {"default": 512,"min": 0, "max": 4096, "step": 1}), + "cam_index": ("INT", {"default": 0,"min": 0, "max": 255, "step": 1}), + "release": ("BOOLEAN", {"default": False}), + }, + } + + def capture(self, x, y, cam_index, width, height, release): + # Check if the camera index has changed or the capture object doesn't exist + if not hasattr(self, "cap") or self.cap is None or self.current_cam_index != cam_index: + if hasattr(self, "cap") and self.cap is not None: + self.cap.release() + self.current_cam_index = cam_index + self.cap = cv2.VideoCapture(cam_index) + try: + self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, width) + self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, height) + except: + pass + if not self.cap.isOpened(): + raise Exception("Could not open webcam") + + ret, frame = self.cap.read() + if not ret: + raise Exception("Failed to capture image from webcam") + + # Crop the frame to the specified bbox + frame = frame[y:y+height, x:x+width] + img_torch = torch.from_numpy(frame[..., [2, 1, 0]]).float() / 255.0 + + if release: + self.cap.release() + self.cap = None + + return (img_torch.unsqueeze(0),) + +class AddLabel: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "image":("IMAGE",), + "text_x": ("INT", {"default": 10, "min": 0, "max": 4096, "step": 1}), + "text_y": ("INT", {"default": 2, "min": 0, "max": 4096, "step": 1}), + "height": ("INT", {"default": 48, "min": -1, "max": 4096, "step": 1}), + "font_size": ("INT", {"default": 32, "min": 0, "max": 4096, "step": 1}), + "font_color": ("STRING", {"default": "white"}), + "label_color": ("STRING", {"default": "black"}), + "font": (folder_paths.get_filename_list("kjnodes_fonts"), ), + "text": ("STRING", {"default": "Text"}), + "direction": ( + [ 'up', + 'down', + 'left', + 'right', + 'overlay' + ], + { + "default": 'up' + }), + }, + "optional":{ + "caption": ("STRING", {"default": "", "forceInput": True}), + } + } + RETURN_TYPES = ("IMAGE",) + FUNCTION = "addlabel" + CATEGORY = "KJNodes/text" + DESCRIPTION = """ +Creates a new with the given text, and concatenates it to +either above or below the input image. +Note that this changes the input image's height! +Fonts are loaded from this folder: +ComfyUI/custom_nodes/ComfyUI-KJNodes/fonts +""" + + def addlabel(self, image, text_x, text_y, text, height, font_size, font_color, label_color, font, direction, caption=""): + batch_size = image.shape[0] + width = image.shape[2] + + font_path = os.path.join(script_directory, "fonts", "TTNorms-Black.otf") if font == "TTNorms-Black.otf" else folder_paths.get_full_path("kjnodes_fonts", font) + + def process_image(input_image, caption_text): + font = ImageFont.truetype(font_path, font_size) + words = caption_text.split() + lines = [] + current_line = [] + current_line_width = 0 + + for word in words: + word_width = font.getbbox(word)[2] + if current_line_width + word_width <= width - 2 * text_x: + current_line.append(word) + current_line_width += word_width + font.getbbox(" ")[2] # Add space width + else: + lines.append(" ".join(current_line)) + current_line = [word] + current_line_width = word_width + + if current_line: + lines.append(" ".join(current_line)) + + if direction == 'overlay': + pil_image = Image.fromarray((input_image.cpu().numpy() * 255).astype(np.uint8)) + else: + if height == -1: + # Adjust the image height automatically + margin = 8 + required_height = (text_y + len(lines) * font_size) + margin # Calculate required height + pil_image = Image.new("RGB", (width, required_height), label_color) + else: + # Initialize with a minimal height + label_image = Image.new("RGB", (width, height), label_color) + pil_image = label_image + + draw = ImageDraw.Draw(pil_image) + + + y_offset = text_y + for line in lines: + try: + draw.text((text_x, y_offset), line, font=font, fill=font_color, features=['-liga']) + except: + draw.text((text_x, y_offset), line, font=font, fill=font_color) + y_offset += font_size + + processed_image = torch.from_numpy(np.array(pil_image).astype(np.float32) / 255.0).unsqueeze(0) + return processed_image + + if caption == "": + processed_images = [process_image(img, text) for img in image] + else: + assert len(caption) == batch_size, f"Number of captions {(len(caption))} does not match number of images" + processed_images = [process_image(img, cap) for img, cap in zip(image, caption)] + processed_batch = torch.cat(processed_images, dim=0) + + # Combine images based on direction + if direction == 'down': + combined_images = torch.cat((image, processed_batch), dim=1) + elif direction == 'up': + combined_images = torch.cat((processed_batch, image), dim=1) + elif direction == 'left': + processed_batch = torch.rot90(processed_batch, 3, (2, 3)).permute(0, 3, 1, 2) + combined_images = torch.cat((processed_batch, image), dim=2) + elif direction == 'right': + processed_batch = torch.rot90(processed_batch, 3, (2, 3)).permute(0, 3, 1, 2) + combined_images = torch.cat((image, processed_batch), dim=2) + else: + combined_images = processed_batch + + return (combined_images,) + +class GetImageSizeAndCount: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "image": ("IMAGE",), + }} + + RETURN_TYPES = ("IMAGE","INT", "INT", "INT",) + RETURN_NAMES = ("image", "width", "height", "count",) + FUNCTION = "getsize" + CATEGORY = "KJNodes/image" + DESCRIPTION = """ +Returns width, height and batch size of the image, +and passes it through unchanged. + +""" + + def getsize(self, image): + width = image.shape[2] + height = image.shape[1] + count = image.shape[0] + return {"ui": { + "text": [f"{count}x{width}x{height}"]}, + "result": (image, width, height, count) + } + +class ImageBatchRepeatInterleaving: + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "repeat" + CATEGORY = "KJNodes/image" + DESCRIPTION = """ +Repeats each image in a batch by the specified number of times. +Example batch of 5 images: 0, 1 ,2, 3, 4 +with repeats 2 becomes batch of 10 images: 0, 0, 1, 1, 2, 2, 3, 3, 4, 4 +""" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "images": ("IMAGE",), + "repeats": ("INT", {"default": 1, "min": 1, "max": 4096}), + }, + } + + def repeat(self, images, repeats): + + repeated_images = torch.repeat_interleave(images, repeats=repeats, dim=0) + return (repeated_images, ) + +class ImageUpscaleWithModelBatched: + @classmethod + def INPUT_TYPES(s): + return {"required": { "upscale_model": ("UPSCALE_MODEL",), + "images": ("IMAGE",), + "per_batch": ("INT", {"default": 16, "min": 1, "max": 4096, "step": 1}), + }} + RETURN_TYPES = ("IMAGE",) + FUNCTION = "upscale" + CATEGORY = "KJNodes/image" + DESCRIPTION = """ +Same as ComfyUI native model upscaling node, +but allows setting sub-batches for reduced VRAM usage. +""" + def upscale(self, upscale_model, images, per_batch): + + device = model_management.get_torch_device() + upscale_model.to(device) + in_img = images.movedim(-1,-3) + + steps = in_img.shape[0] + pbar = ProgressBar(steps) + t = [] + + for start_idx in range(0, in_img.shape[0], per_batch): + sub_images = upscale_model(in_img[start_idx:start_idx+per_batch].to(device)) + t.append(sub_images.cpu()) + # Calculate the number of images processed in this batch + batch_count = sub_images.shape[0] + # Update the progress bar by the number of images processed in this batch + pbar.update(batch_count) + upscale_model.cpu() + + t = torch.cat(t, dim=0).permute(0, 2, 3, 1).cpu() + + return (t,) + +class ImageNormalize_Neg1_To_1: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "images": ("IMAGE",), + + }} + RETURN_TYPES = ("IMAGE",) + FUNCTION = "normalize" + CATEGORY = "KJNodes/image" + DESCRIPTION = """ +Normalize the images to be in the range [-1, 1] +""" + + def normalize(self,images): + images = images * 2.0 - 1.0 + return (images,) + +class RemapImageRange: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "image": ("IMAGE",), + "min": ("FLOAT", {"default": 0.0,"min": -10.0, "max": 1.0, "step": 0.01}), + "max": ("FLOAT", {"default": 1.0,"min": 0.0, "max": 10.0, "step": 0.01}), + "clamp": ("BOOLEAN", {"default": True}), + }, + } + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "remap" + CATEGORY = "KJNodes/image" + DESCRIPTION = """ +Remaps the image values to the specified range. +""" + + def remap(self, image, min, max, clamp): + if image.dtype == torch.float16: + image = image.to(torch.float32) + image = min + image * (max - min) + if clamp: + image = torch.clamp(image, min=0.0, max=1.0) + return (image, ) + +class SplitImageChannels: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "image": ("IMAGE",), + }, + } + + RETURN_TYPES = ("IMAGE", "IMAGE", "IMAGE", "MASK") + RETURN_NAMES = ("red", "green", "blue", "mask") + FUNCTION = "split" + CATEGORY = "KJNodes/image" + DESCRIPTION = """ +Splits image channels into images where the selected channel +is repeated for all channels, and the alpha as a mask. +""" + + def split(self, image): + red = image[:, :, :, 0:1] # Red channel + green = image[:, :, :, 1:2] # Green channel + blue = image[:, :, :, 2:3] # Blue channel + alpha = image[:, :, :, 3:4] # Alpha channel + alpha = alpha.squeeze(-1) + + # Repeat the selected channel for all channels + red = torch.cat([red, red, red], dim=3) + green = torch.cat([green, green, green], dim=3) + blue = torch.cat([blue, blue, blue], dim=3) + return (red, green, blue, alpha) + +class MergeImageChannels: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "red": ("IMAGE",), + "green": ("IMAGE",), + "blue": ("IMAGE",), + + }, + "optional": { + "alpha": ("MASK", {"default": None}), + }, + } + + RETURN_TYPES = ("IMAGE",) + RETURN_NAMES = ("image",) + FUNCTION = "merge" + CATEGORY = "KJNodes/image" + DESCRIPTION = """ +Merges channel data into an image. +""" + + def merge(self, red, green, blue, alpha=None): + image = torch.stack([ + red[..., 0, None], # Red channel + green[..., 1, None], # Green channel + blue[..., 2, None] # Blue channel + ], dim=-1) + image = image.squeeze(-2) + if alpha is not None: + image = torch.cat([image, alpha.unsqueeze(-1)], dim=-1) + return (image,) + +class ImagePadForOutpaintMasked: + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "image": ("IMAGE",), + "left": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}), + "top": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}), + "right": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}), + "bottom": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}), + "feathering": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 1}), + }, + "optional": { + "mask": ("MASK",), + } + } + + RETURN_TYPES = ("IMAGE", "MASK") + FUNCTION = "expand_image" + + CATEGORY = "image" + + def expand_image(self, image, left, top, right, bottom, feathering, mask=None): + if mask is not None: + if torch.allclose(mask, torch.zeros_like(mask)): + print("Warning: The incoming mask is fully black. Handling it as None.") + mask = None + B, H, W, C = image.size() + + new_image = torch.ones( + (B, H + top + bottom, W + left + right, C), + dtype=torch.float32, + ) * 0.5 + + new_image[:, top:top + H, left:left + W, :] = image + + if mask is None: + new_mask = torch.ones( + (B, H + top + bottom, W + left + right), + dtype=torch.float32, + ) + + t = torch.zeros( + (B, H, W), + dtype=torch.float32 + ) + else: + # If a mask is provided, pad it to fit the new image size + mask = F.pad(mask, (left, right, top, bottom), mode='constant', value=0) + mask = 1 - mask + t = torch.zeros_like(mask) + + if feathering > 0 and feathering * 2 < H and feathering * 2 < W: + + for i in range(H): + for j in range(W): + dt = i if top != 0 else H + db = H - i if bottom != 0 else H + + dl = j if left != 0 else W + dr = W - j if right != 0 else W + + d = min(dt, db, dl, dr) + + if d >= feathering: + continue + + v = (feathering - d) / feathering + + if mask is None: + t[:, i, j] = v * v + else: + t[:, top + i, left + j] = v * v + + if mask is None: + new_mask[:, top:top + H, left:left + W] = t + return (new_image, new_mask,) + else: + return (new_image, mask,) + +class ImagePadForOutpaintTargetSize: + upscale_methods = ["nearest-exact", "bilinear", "area", "bicubic", "lanczos"] + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "image": ("IMAGE",), + "target_width": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}), + "target_height": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}), + "feathering": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 1}), + "upscale_method": (s.upscale_methods,), + }, + "optional": { + "mask": ("MASK",), + } + } + + RETURN_TYPES = ("IMAGE", "MASK") + FUNCTION = "expand_image" + + CATEGORY = "image" + + def expand_image(self, image, target_width, target_height, feathering, upscale_method, mask=None): + B, H, W, C = image.size() + new_height = H + new_width = W + # Calculate the scaling factor while maintaining aspect ratio + scaling_factor = min(target_width / W, target_height / H) + + # Check if the image needs to be downscaled + if scaling_factor < 1: + image = image.movedim(-1,1) + # Calculate the new width and height after downscaling + new_width = int(W * scaling_factor) + new_height = int(H * scaling_factor) + + # Downscale the image + image_scaled = common_upscale(image, new_width, new_height, upscale_method, "disabled").movedim(1,-1) + if mask is not None: + mask_scaled = mask.unsqueeze(0) # Add an extra dimension for batch size + mask_scaled = F.interpolate(mask_scaled, size=(new_height, new_width), mode="nearest") + mask_scaled = mask_scaled.squeeze(0) # Remove the extra dimension after interpolation + else: + mask_scaled = mask + else: + # If downscaling is not needed, use the original image dimensions + image_scaled = image + mask_scaled = mask + + # Calculate how much padding is needed to reach the target dimensions + pad_top = max(0, (target_height - new_height) // 2) + pad_bottom = max(0, target_height - new_height - pad_top) + pad_left = max(0, (target_width - new_width) // 2) + pad_right = max(0, target_width - new_width - pad_left) + + # Now call the original expand_image with the calculated padding + return ImagePadForOutpaintMasked.expand_image(self, image_scaled, pad_left, pad_top, pad_right, pad_bottom, feathering, mask_scaled) + +class ImagePrepForICLora: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "reference_image": ("IMAGE",), + "output_width": ("INT", {"default": 1024, "min": 1, "max": 4096, "step": 1}), + "output_height": ("INT", {"default": 1024, "min": 1, "max": 4096, "step": 1}), + "border_width": ("INT", {"default": 0, "min": 0, "max": 4096, "step": 1}), + }, + "optional": { + "latent_image": ("IMAGE",), + "latent_mask": ("MASK",), + "reference_mask": ("MASK",), + } + } + + RETURN_TYPES = ("IMAGE", "MASK") + FUNCTION = "expand_image" + + CATEGORY = "image" + + def expand_image(self, reference_image, output_width, output_height, border_width, latent_image=None, reference_mask=None, latent_mask=None): + + if reference_mask is not None: + if torch.allclose(reference_mask, torch.zeros_like(reference_mask)): + print("Warning: The incoming mask is fully black. Handling it as None.") + reference_mask = None + image = reference_image + B, H, W, C = image.size() + + # Handle mask + if reference_mask is not None: + resized_mask = torch.nn.functional.interpolate( + reference_mask.unsqueeze(1), + size=(H, W), + mode='nearest' + ).squeeze(1) + print(resized_mask.shape) + image = image * resized_mask.unsqueeze(-1) + + # Calculate new width maintaining aspect ratio + new_width = int((W / H) * output_height) + + # Resize image to new height while maintaining aspect ratio + resized_image = common_upscale(image.movedim(-1,1), new_width, output_height, "lanczos", "disabled").movedim(1,-1) + + # Create padded image + if latent_image is None: + pad_image = torch.zeros((B, output_height, output_width, C), device=image.device) + else: + resized_latent_image = common_upscale(latent_image.movedim(-1,1), output_width, output_height, "lanczos", "disabled").movedim(1,-1) + pad_image = resized_latent_image + if latent_mask is not None: + resized_latent_mask = torch.nn.functional.interpolate( + latent_mask.unsqueeze(1), + size=(pad_image.shape[1], pad_image.shape[2]), + mode='nearest' + ).squeeze(1) + + if border_width > 0: + border = torch.zeros((B, output_height, border_width, C), device=image.device) + padded_image = torch.cat((resized_image, border, pad_image), dim=2) + if latent_mask is not None: + padded_mask = torch.zeros((B, padded_image.shape[1], padded_image.shape[2]), device=image.device) + padded_mask[:, :, (new_width + border_width):] = resized_latent_mask + else: + padded_mask = torch.ones((B, padded_image.shape[1], padded_image.shape[2]), device=image.device) + padded_mask[:, :, :new_width + border_width] = 0 + else: + padded_image = torch.cat((resized_image, pad_image), dim=2) + if latent_mask is not None: + padded_mask = torch.zeros((B, padded_image.shape[1], padded_image.shape[2]), device=image.device) + padded_mask[:, :, new_width:] = resized_latent_mask + else: + padded_mask = torch.ones((B, padded_image.shape[1], padded_image.shape[2]), device=image.device) + padded_mask[:, :, :new_width] = 0 + + return (padded_image, padded_mask) + + +class ImageAndMaskPreview(SaveImage): + def __init__(self): + self.output_dir = folder_paths.get_temp_directory() + self.type = "temp" + self.prefix_append = "_temp_" + ''.join(random.choice("abcdefghijklmnopqrstupvxyz") for x in range(5)) + self.compress_level = 4 + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "mask_opacity": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}), + "mask_color": ("STRING", {"default": "255, 255, 255"}), + "pass_through": ("BOOLEAN", {"default": False}), + }, + "optional": { + "image": ("IMAGE",), + "mask": ("MASK",), + }, + "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"}, + } + RETURN_TYPES = ("IMAGE",) + RETURN_NAMES = ("composite",) + FUNCTION = "execute" + CATEGORY = "KJNodes/masking" + DESCRIPTION = """ +Preview an image or a mask, when both inputs are used +composites the mask on top of the image. +with pass_through on the preview is disabled and the +composite is returned from the composite slot instead, +this allows for the preview to be passed for video combine +nodes for example. +""" + + def execute(self, mask_opacity, mask_color, pass_through, filename_prefix="ComfyUI", image=None, mask=None, prompt=None, extra_pnginfo=None): + if mask is not None and image is None: + preview = mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])).movedim(1, -1).expand(-1, -1, -1, 3) + elif mask is None and image is not None: + preview = image + elif mask is not None and image is not None: + mask_adjusted = mask * mask_opacity + mask_image = mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])).movedim(1, -1).expand(-1, -1, -1, 3).clone() + + if ',' in mask_color: + color_list = np.clip([int(channel) for channel in mask_color.split(',')], 0, 255) # RGB format + else: + mask_color = mask_color.lstrip('#') + color_list = [int(mask_color[i:i+2], 16) for i in (0, 2, 4)] # Hex format + mask_image[:, :, :, 0] = color_list[0] / 255 # Red channel + mask_image[:, :, :, 1] = color_list[1] / 255 # Green channel + mask_image[:, :, :, 2] = color_list[2] / 255 # Blue channel + + preview, = ImageCompositeMasked.composite(self, image, mask_image, 0, 0, True, mask_adjusted) + if pass_through: + return (preview, ) + return(self.save_images(preview, filename_prefix, prompt, extra_pnginfo)) + +class CrossFadeImages: + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "crossfadeimages" + CATEGORY = "KJNodes/image" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "images_1": ("IMAGE",), + "images_2": ("IMAGE",), + "interpolation": (["linear", "ease_in", "ease_out", "ease_in_out", "bounce", "elastic", "glitchy", "exponential_ease_out"],), + "transition_start_index": ("INT", {"default": 1,"min": 0, "max": 4096, "step": 1}), + "transitioning_frames": ("INT", {"default": 1,"min": 0, "max": 4096, "step": 1}), + "start_level": ("FLOAT", {"default": 0.0,"min": 0.0, "max": 1.0, "step": 0.01}), + "end_level": ("FLOAT", {"default": 1.0,"min": 0.0, "max": 1.0, "step": 0.01}), + }, + } + + def crossfadeimages(self, images_1, images_2, transition_start_index, transitioning_frames, interpolation, start_level, end_level): + + def crossfade(images_1, images_2, alpha): + crossfade = (1 - alpha) * images_1 + alpha * images_2 + return crossfade + def ease_in(t): + return t * t + def ease_out(t): + return 1 - (1 - t) * (1 - t) + def ease_in_out(t): + return 3 * t * t - 2 * t * t * t + def bounce(t): + if t < 0.5: + return self.ease_out(t * 2) * 0.5 + else: + return self.ease_in((t - 0.5) * 2) * 0.5 + 0.5 + def elastic(t): + return math.sin(13 * math.pi / 2 * t) * math.pow(2, 10 * (t - 1)) + def glitchy(t): + return t + 0.1 * math.sin(40 * t) + def exponential_ease_out(t): + return 1 - (1 - t) ** 4 + + easing_functions = { + "linear": lambda t: t, + "ease_in": ease_in, + "ease_out": ease_out, + "ease_in_out": ease_in_out, + "bounce": bounce, + "elastic": elastic, + "glitchy": glitchy, + "exponential_ease_out": exponential_ease_out, + } + + crossfade_images = [] + + alphas = torch.linspace(start_level, end_level, transitioning_frames) + for i in range(transitioning_frames): + alpha = alphas[i] + image1 = images_1[i + transition_start_index] + image2 = images_2[i + transition_start_index] + easing_function = easing_functions.get(interpolation) + alpha = easing_function(alpha) # Apply the easing function to the alpha value + + crossfade_image = crossfade(image1, image2, alpha) + crossfade_images.append(crossfade_image) + + # Convert crossfade_images to tensor + crossfade_images = torch.stack(crossfade_images, dim=0) + # Get the last frame result of the interpolation + last_frame = crossfade_images[-1] + # Calculate the number of remaining frames from images_2 + remaining_frames = len(images_2) - (transition_start_index + transitioning_frames) + # Crossfade the remaining frames with the last used alpha value + for i in range(remaining_frames): + alpha = alphas[-1] + image1 = images_1[i + transition_start_index + transitioning_frames] + image2 = images_2[i + transition_start_index + transitioning_frames] + easing_function = easing_functions.get(interpolation) + alpha = easing_function(alpha) # Apply the easing function to the alpha value + + crossfade_image = crossfade(image1, image2, alpha) + crossfade_images = torch.cat([crossfade_images, crossfade_image.unsqueeze(0)], dim=0) + # Append the beginning of images_1 + beginning_images_1 = images_1[:transition_start_index] + crossfade_images = torch.cat([beginning_images_1, crossfade_images], dim=0) + return (crossfade_images, ) + +class CrossFadeImagesMulti: + RETURN_TYPES = ("IMAGE",) + FUNCTION = "crossfadeimages" + CATEGORY = "KJNodes/image" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "inputcount": ("INT", {"default": 2, "min": 2, "max": 1000, "step": 1}), + "image_1": ("IMAGE",), + "image_2": ("IMAGE",), + "interpolation": (["linear", "ease_in", "ease_out", "ease_in_out", "bounce", "elastic", "glitchy", "exponential_ease_out"],), + "transitioning_frames": ("INT", {"default": 1,"min": 0, "max": 4096, "step": 1}), + }, + } + + def crossfadeimages(self, inputcount, transitioning_frames, interpolation, **kwargs): + + def crossfade(images_1, images_2, alpha): + crossfade = (1 - alpha) * images_1 + alpha * images_2 + return crossfade + def ease_in(t): + return t * t + def ease_out(t): + return 1 - (1 - t) * (1 - t) + def ease_in_out(t): + return 3 * t * t - 2 * t * t * t + def bounce(t): + if t < 0.5: + return self.ease_out(t * 2) * 0.5 + else: + return self.ease_in((t - 0.5) * 2) * 0.5 + 0.5 + def elastic(t): + return math.sin(13 * math.pi / 2 * t) * math.pow(2, 10 * (t - 1)) + def glitchy(t): + return t + 0.1 * math.sin(40 * t) + def exponential_ease_out(t): + return 1 - (1 - t) ** 4 + + easing_functions = { + "linear": lambda t: t, + "ease_in": ease_in, + "ease_out": ease_out, + "ease_in_out": ease_in_out, + "bounce": bounce, + "elastic": elastic, + "glitchy": glitchy, + "exponential_ease_out": exponential_ease_out, + } + + image_1 = kwargs["image_1"] + height = image_1.shape[1] + width = image_1.shape[2] + + easing_function = easing_functions[interpolation] + + for c in range(1, inputcount): + frames = [] + new_image = kwargs[f"image_{c + 1}"] + new_image_height = new_image.shape[1] + new_image_width = new_image.shape[2] + + if new_image_height != height or new_image_width != width: + new_image = common_upscale(new_image.movedim(-1, 1), width, height, "lanczos", "disabled") + new_image = new_image.movedim(1, -1) # Move channels back to the last dimension + + last_frame_image_1 = image_1[-1] + first_frame_image_2 = new_image[0] + + for frame in range(transitioning_frames): + t = frame / (transitioning_frames - 1) + alpha = easing_function(t) + alpha_tensor = torch.tensor(alpha, dtype=last_frame_image_1.dtype, device=last_frame_image_1.device) + frame_image = crossfade(last_frame_image_1, first_frame_image_2, alpha_tensor) + frames.append(frame_image) + + frames = torch.stack(frames) + image_1 = torch.cat((image_1, frames, new_image), dim=0) + + return image_1, + +def transition_images(images_1, images_2, alpha, transition_type, blur_radius, reverse): + width = images_1.shape[1] + height = images_1.shape[0] + + mask = torch.zeros_like(images_1, device=images_1.device) + + alpha = alpha.item() + if reverse: + alpha = 1 - alpha + + #transitions from matteo's essential nodes + if "horizontal slide" in transition_type: + pos = round(width * alpha) + mask[:, :pos, :] = 1.0 + elif "vertical slide" in transition_type: + pos = round(height * alpha) + mask[:pos, :, :] = 1.0 + elif "box" in transition_type: + box_w = round(width * alpha) + box_h = round(height * alpha) + x1 = (width - box_w) // 2 + y1 = (height - box_h) // 2 + x2 = x1 + box_w + y2 = y1 + box_h + mask[y1:y2, x1:x2, :] = 1.0 + elif "circle" in transition_type: + radius = math.ceil(math.sqrt(pow(width, 2) + pow(height, 2)) * alpha / 2) + c_x = width // 2 + c_y = height // 2 + x = torch.arange(0, width, dtype=torch.float32, device="cpu") + y = torch.arange(0, height, dtype=torch.float32, device="cpu") + y, x = torch.meshgrid((y, x), indexing="ij") + circle = ((x - c_x) ** 2 + (y - c_y) ** 2) <= (radius ** 2) + mask[circle] = 1.0 + elif "horizontal door" in transition_type: + bar = math.ceil(height * alpha / 2) + if bar > 0: + mask[:bar, :, :] = 1.0 + mask[-bar:,:, :] = 1.0 + elif "vertical door" in transition_type: + bar = math.ceil(width * alpha / 2) + if bar > 0: + mask[:, :bar,:] = 1.0 + mask[:, -bar:,:] = 1.0 + elif "fade" in transition_type: + mask[:, :, :] = alpha + + mask = gaussian_blur(mask, blur_radius) + + return images_1 * (1 - mask) + images_2 * mask + +def ease_in(t): + return t * t +def ease_out(t): + return 1 - (1 - t) * (1 - t) +def ease_in_out(t): + return 3 * t * t - 2 * t * t * t +def bounce(t): + if t < 0.5: + return ease_out(t * 2) * 0.5 + else: + return ease_in((t - 0.5) * 2) * 0.5 + 0.5 +def elastic(t): + return math.sin(13 * math.pi / 2 * t) * math.pow(2, 10 * (t - 1)) +def glitchy(t): + return t + 0.1 * math.sin(40 * t) +def exponential_ease_out(t): + return 1 - (1 - t) ** 4 + +def gaussian_blur(mask, blur_radius): + if blur_radius > 0: + kernel_size = int(blur_radius * 2) + 1 + if kernel_size % 2 == 0: + kernel_size += 1 # Ensure kernel size is odd + sigma = blur_radius / 3 + x = torch.arange(-kernel_size // 2 + 1, kernel_size // 2 + 1, dtype=torch.float32) + x = torch.exp(-0.5 * (x / sigma) ** 2) + kernel1d = x / x.sum() + kernel2d = kernel1d[:, None] * kernel1d[None, :] + kernel2d = kernel2d.to(mask.device) + kernel2d = kernel2d.expand(mask.shape[2], 1, kernel2d.shape[0], kernel2d.shape[1]) + mask = mask.permute(2, 0, 1).unsqueeze(0) # Change to [C, H, W] and add batch dimension + mask = F.conv2d(mask, kernel2d, padding=kernel_size // 2, groups=mask.shape[1]) + mask = mask.squeeze(0).permute(1, 2, 0) # Change back to [H, W, C] + return mask + +easing_functions = { + "linear": lambda t: t, + "ease_in": ease_in, + "ease_out": ease_out, + "ease_in_out": ease_in_out, + "bounce": bounce, + "elastic": elastic, + "glitchy": glitchy, + "exponential_ease_out": exponential_ease_out, +} + +class TransitionImagesMulti: + RETURN_TYPES = ("IMAGE",) + FUNCTION = "transition" + CATEGORY = "KJNodes/image" + DESCRIPTION = """ +Creates transitions between images. +""" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "inputcount": ("INT", {"default": 2, "min": 2, "max": 1000, "step": 1}), + "image_1": ("IMAGE",), + "image_2": ("IMAGE",), + "interpolation": (["linear", "ease_in", "ease_out", "ease_in_out", "bounce", "elastic", "glitchy", "exponential_ease_out"],), + "transition_type": (["horizontal slide", "vertical slide", "box", "circle", "horizontal door", "vertical door", "fade"],), + "transitioning_frames": ("INT", {"default": 1,"min": 0, "max": 4096, "step": 1}), + "blur_radius": ("FLOAT", {"default": 0.0,"min": 0.0, "max": 100.0, "step": 0.1}), + "reverse": ("BOOLEAN", {"default": False}), + "device": (["CPU", "GPU"], {"default": "CPU"}), + }, + } + + def transition(self, inputcount, transitioning_frames, transition_type, interpolation, device, blur_radius, reverse, **kwargs): + + gpu = model_management.get_torch_device() + + image_1 = kwargs["image_1"] + height = image_1.shape[1] + width = image_1.shape[2] + + easing_function = easing_functions[interpolation] + + for c in range(1, inputcount): + frames = [] + new_image = kwargs[f"image_{c + 1}"] + new_image_height = new_image.shape[1] + new_image_width = new_image.shape[2] + + if new_image_height != height or new_image_width != width: + new_image = common_upscale(new_image.movedim(-1, 1), width, height, "lanczos", "disabled") + new_image = new_image.movedim(1, -1) # Move channels back to the last dimension + + last_frame_image_1 = image_1[-1] + first_frame_image_2 = new_image[0] + if device == "GPU": + last_frame_image_1 = last_frame_image_1.to(gpu) + first_frame_image_2 = first_frame_image_2.to(gpu) + + if reverse: + last_frame_image_1, first_frame_image_2 = first_frame_image_2, last_frame_image_1 + + for frame in range(transitioning_frames): + t = frame / (transitioning_frames - 1) + alpha = easing_function(t) + alpha_tensor = torch.tensor(alpha, dtype=last_frame_image_1.dtype, device=last_frame_image_1.device) + frame_image = transition_images(last_frame_image_1, first_frame_image_2, alpha_tensor, transition_type, blur_radius, reverse) + frames.append(frame_image) + + frames = torch.stack(frames).cpu() + image_1 = torch.cat((image_1, frames, new_image), dim=0) + + return image_1.cpu(), + +class TransitionImagesInBatch: + RETURN_TYPES = ("IMAGE",) + FUNCTION = "transition" + CATEGORY = "KJNodes/image" + DESCRIPTION = """ +Creates transitions between images in a batch. +""" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "images": ("IMAGE",), + "interpolation": (["linear", "ease_in", "ease_out", "ease_in_out", "bounce", "elastic", "glitchy", "exponential_ease_out"],), + "transition_type": (["horizontal slide", "vertical slide", "box", "circle", "horizontal door", "vertical door", "fade"],), + "transitioning_frames": ("INT", {"default": 1,"min": 0, "max": 4096, "step": 1}), + "blur_radius": ("FLOAT", {"default": 0.0,"min": 0.0, "max": 100.0, "step": 0.1}), + "reverse": ("BOOLEAN", {"default": False}), + "device": (["CPU", "GPU"], {"default": "CPU"}), + }, + } + + #transitions from matteo's essential nodes + def transition(self, images, transitioning_frames, transition_type, interpolation, device, blur_radius, reverse): + if images.shape[0] == 1: + return images, + + gpu = model_management.get_torch_device() + + easing_function = easing_functions[interpolation] + + images_list = [] + pbar = ProgressBar(images.shape[0] - 1) + for i in range(images.shape[0] - 1): + frames = [] + image_1 = images[i] + image_2 = images[i + 1] + + if device == "GPU": + image_1 = image_1.to(gpu) + image_2 = image_2.to(gpu) + + if reverse: + image_1, image_2 = image_2, image_1 + + for frame in range(transitioning_frames): + t = frame / (transitioning_frames - 1) + alpha = easing_function(t) + alpha_tensor = torch.tensor(alpha, dtype=image_1.dtype, device=image_1.device) + frame_image = transition_images(image_1, image_2, alpha_tensor, transition_type, blur_radius, reverse) + frames.append(frame_image) + pbar.update(1) + + frames = torch.stack(frames).cpu() + images_list.append(frames) + images = torch.cat(images_list, dim=0) + + return images.cpu(), + +class ShuffleImageBatch: + RETURN_TYPES = ("IMAGE",) + FUNCTION = "shuffle" + CATEGORY = "KJNodes/image" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "images": ("IMAGE",), + "seed": ("INT", {"default": 123,"min": 0, "max": 0xffffffffffffffff, "step": 1}), + }, + } + + def shuffle(self, images, seed): + torch.manual_seed(seed) + B, H, W, C = images.shape + indices = torch.randperm(B) + shuffled_images = images[indices] + + return shuffled_images, + +class GetImageRangeFromBatch: + + RETURN_TYPES = ("IMAGE", "MASK", ) + FUNCTION = "imagesfrombatch" + CATEGORY = "KJNodes/image" + DESCRIPTION = """ +Returns a range of images from a batch. +""" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "start_index": ("INT", {"default": 0,"min": -1, "max": 4096, "step": 1}), + "num_frames": ("INT", {"default": 1,"min": 1, "max": 4096, "step": 1}), + }, + "optional": { + "images": ("IMAGE",), + "masks": ("MASK",), + } + } + + def imagesfrombatch(self, start_index, num_frames, images=None, masks=None): + chosen_images = None + chosen_masks = None + + # Process images if provided + if images is not None: + if start_index == -1: + start_index = max(0, len(images) - num_frames) + if start_index < 0 or start_index >= len(images): + raise ValueError("Start index is out of range") + end_index = min(start_index + num_frames, len(images)) + chosen_images = images[start_index:end_index] + + # Process masks if provided + if masks is not None: + if start_index == -1: + start_index = max(0, len(masks) - num_frames) + if start_index < 0 or start_index >= len(masks): + raise ValueError("Start index is out of range for masks") + end_index = min(start_index + num_frames, len(masks)) + chosen_masks = masks[start_index:end_index] + + return (chosen_images, chosen_masks,) + +class GetLatentRangeFromBatch: + + RETURN_TYPES = ("LATENT", ) + FUNCTION = "latentsfrombatch" + CATEGORY = "KJNodes/latents" + DESCRIPTION = """ +Returns a range of latents from a batch. +""" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "latents": ("LATENT",), + "start_index": ("INT", {"default": 0,"min": -1, "max": 4096, "step": 1}), + "num_frames": ("INT", {"default": 1,"min": -1, "max": 4096, "step": 1}), + }, + } + + def latentsfrombatch(self, latents, start_index, num_frames): + chosen_latents = None + samples = latents["samples"] + if len(samples.shape) == 4: + B, C, H, W = samples.shape + num_latents = B + elif len(samples.shape) == 5: + B, C, T, H, W = samples.shape + num_latents = T + + if start_index == -1: + start_index = max(0, num_latents - num_frames) + if start_index < 0 or start_index >= num_latents: + raise ValueError("Start index is out of range") + + end_index = num_latents if num_frames == -1 else min(start_index + num_frames, num_latents) + + if len(samples.shape) == 4: + chosen_latents = samples[start_index:end_index] + elif len(samples.shape) == 5: + chosen_latents = samples[:, :, start_index:end_index] + + return ({"samples": chosen_latents,},) + +class InsertLatentToIndex: + + RETURN_TYPES = ("LATENT", ) + FUNCTION = "insert" + CATEGORY = "KJNodes/latents" + DESCRIPTION = """ +Inserts a latent at the specified index into the original latent batch. +""" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "source": ("LATENT",), + "destination": ("LATENT",), + "index": ("INT", {"default": 0,"min": -1, "max": 4096, "step": 1}), + }, + } + + def insert(self, source, destination, index): + samples_destination = destination["samples"] + samples_source = source["samples"].to(samples_destination) + + if len(samples_source.shape) == 4: + B, C, H, W = samples_source.shape + num_latents = B + elif len(samples_source.shape) == 5: + B, C, T, H, W = samples_source.shape + num_latents = T + + if index >= num_latents or index < 0: + raise ValueError(f"Index {index} out of bounds for tensor with {num_latents} latents") + + if len(samples_source.shape) == 4: + joined_latents = torch.cat([ + samples_destination[:index], + samples_source, + samples_destination[index+1:] + ], dim=0) + else: + joined_latents = torch.cat([ + samples_destination[:, :, :index], + samples_source, + samples_destination[:, :, index+1:] + ], dim=2) + + return ({"samples": joined_latents,},) + +class GetImagesFromBatchIndexed: + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "indexedimagesfrombatch" + CATEGORY = "KJNodes/image" + DESCRIPTION = """ +Selects and returns the images at the specified indices as an image batch. +""" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "images": ("IMAGE",), + "indexes": ("STRING", {"default": "0, 1, 2", "multiline": True}), + }, + } + + def indexedimagesfrombatch(self, images, indexes): + + # Parse the indexes string into a list of integers + index_list = [int(index.strip()) for index in indexes.split(',')] + + # Convert list of indices to a PyTorch tensor + indices_tensor = torch.tensor(index_list, dtype=torch.long) + + # Select the images at the specified indices + chosen_images = images[indices_tensor] + + return (chosen_images,) + +class InsertImagesToBatchIndexed: + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "insertimagesfrombatch" + CATEGORY = "KJNodes/image" + DESCRIPTION = """ +Inserts images at the specified indices into the original image batch. +""" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "original_images": ("IMAGE",), + "images_to_insert": ("IMAGE",), + "indexes": ("STRING", {"default": "0, 1, 2", "multiline": True}), + }, + } + + def insertimagesfrombatch(self, original_images, images_to_insert, indexes): + + # Parse the indexes string into a list of integers + index_list = [int(index.strip()) for index in indexes.split(',')] + + # Convert list of indices to a PyTorch tensor + indices_tensor = torch.tensor(index_list, dtype=torch.long) + + # Ensure the images_to_insert is a tensor + if not isinstance(images_to_insert, torch.Tensor): + images_to_insert = torch.tensor(images_to_insert) + + # Insert the images at the specified indices + for index, image in zip(indices_tensor, images_to_insert): + original_images[index] = image + + return (original_images,) + +class ReplaceImagesInBatch: + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "replace" + CATEGORY = "KJNodes/image" + DESCRIPTION = """ +Replaces the images in a batch, starting from the specified start index, +with the replacement images. +""" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "original_images": ("IMAGE",), + "replacement_images": ("IMAGE",), + "start_index": ("INT", {"default": 1,"min": 0, "max": 4096, "step": 1}), + }, + } + + def replace(self, original_images, replacement_images, start_index): + images = None + if start_index >= len(original_images): + raise ValueError("GetImageRangeFromBatch: Start index is out of range") + end_index = start_index + len(replacement_images) + if end_index > len(original_images): + raise ValueError("GetImageRangeFromBatch: End index is out of range") + # Create a copy of the original_images tensor + original_images_copy = original_images.clone() + original_images_copy[start_index:end_index] = replacement_images + images = original_images_copy + return (images, ) + + +class ReverseImageBatch: + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "reverseimagebatch" + CATEGORY = "KJNodes/image" + DESCRIPTION = """ +Reverses the order of the images in a batch. +""" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "images": ("IMAGE",), + }, + } + + def reverseimagebatch(self, images): + reversed_images = torch.flip(images, [0]) + return (reversed_images, ) + +class ImageBatchMulti: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "inputcount": ("INT", {"default": 2, "min": 2, "max": 1000, "step": 1}), + "image_1": ("IMAGE", ), + "image_2": ("IMAGE", ), + }, + } + + RETURN_TYPES = ("IMAGE",) + RETURN_NAMES = ("images",) + FUNCTION = "combine" + CATEGORY = "KJNodes/image" + DESCRIPTION = """ +Creates an image batch from multiple images. +You can set how many inputs the node has, +with the **inputcount** and clicking update. +""" + + def combine(self, inputcount, **kwargs): + from nodes import ImageBatch + image_batch_node = ImageBatch() + image = kwargs["image_1"] + for c in range(1, inputcount): + new_image = kwargs[f"image_{c + 1}"] + image, = image_batch_node.batch(image, new_image) + return (image,) + + +class ImageTensorList: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "image1": ("IMAGE",), + "image2": ("IMAGE",), + }} + + RETURN_TYPES = ("IMAGE",) + #OUTPUT_IS_LIST = (True,) + FUNCTION = "append" + CATEGORY = "KJNodes/image" + DESCRIPTION = """ +Creates an image list from the input images. +""" + + def append(self, image1, image2): + image_list = [] + if isinstance(image1, torch.Tensor) and isinstance(image2, torch.Tensor): + image_list = [image1, image2] + elif isinstance(image1, list) and isinstance(image2, torch.Tensor): + image_list = image1 + [image2] + elif isinstance(image1, torch.Tensor) and isinstance(image2, list): + image_list = [image1] + image2 + elif isinstance(image1, list) and isinstance(image2, list): + image_list = image1 + image2 + return image_list, + +class ImageAddMulti: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "inputcount": ("INT", {"default": 2, "min": 2, "max": 1000, "step": 1}), + "image_1": ("IMAGE", ), + "image_2": ("IMAGE", ), + "blending": ( + [ 'add', + 'subtract', + 'multiply', + 'difference', + ], + { + "default": 'add' + }), + "blend_amount": ("FLOAT", {"default": 0.5, "min": 0, "max": 1, "step": 0.01}), + }, + } + + RETURN_TYPES = ("IMAGE",) + RETURN_NAMES = ("images",) + FUNCTION = "add" + CATEGORY = "KJNodes/image" + DESCRIPTION = """ +Add blends multiple images together. +You can set how many inputs the node has, +with the **inputcount** and clicking update. +""" + + def add(self, inputcount, blending, blend_amount, **kwargs): + image = kwargs["image_1"] + for c in range(1, inputcount): + new_image = kwargs[f"image_{c + 1}"] + if blending == "add": + image = torch.add(image * blend_amount, new_image * blend_amount) + elif blending == "subtract": + image = torch.sub(image * blend_amount, new_image * blend_amount) + elif blending == "multiply": + image = torch.mul(image * blend_amount, new_image * blend_amount) + elif blending == "difference": + image = torch.sub(image, new_image) + return (image,) + +class ImageConcatMulti: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "inputcount": ("INT", {"default": 2, "min": 2, "max": 1000, "step": 1}), + "image_1": ("IMAGE", ), + "image_2": ("IMAGE", ), + "direction": ( + [ 'right', + 'down', + 'left', + 'up', + ], + { + "default": 'right' + }), + "match_image_size": ("BOOLEAN", {"default": False}), + }, + } + + RETURN_TYPES = ("IMAGE",) + RETURN_NAMES = ("images",) + FUNCTION = "combine" + CATEGORY = "KJNodes/image" + DESCRIPTION = """ +Creates an image from multiple images. +You can set how many inputs the node has, +with the **inputcount** and clicking update. +""" + + def combine(self, inputcount, direction, match_image_size, **kwargs): + image = kwargs["image_1"] + first_image_shape = None + if first_image_shape is None: + first_image_shape = image.shape + for c in range(1, inputcount): + new_image = kwargs[f"image_{c + 1}"] + image, = ImageConcanate.concatenate(self, image, new_image, direction, match_image_size, first_image_shape=first_image_shape) + first_image_shape = None + return (image,) + +class PreviewAnimation: + def __init__(self): + self.output_dir = folder_paths.get_temp_directory() + self.type = "temp" + self.prefix_append = "_temp_" + ''.join(random.choice("abcdefghijklmnopqrstupvxyz") for x in range(5)) + self.compress_level = 1 + + methods = {"default": 4, "fastest": 0, "slowest": 6} + @classmethod + def INPUT_TYPES(s): + return {"required": + { + "fps": ("FLOAT", {"default": 8.0, "min": 0.01, "max": 1000.0, "step": 0.01}), + }, + "optional": { + "images": ("IMAGE", ), + "masks": ("MASK", ), + }, + } + + RETURN_TYPES = () + FUNCTION = "preview" + OUTPUT_NODE = True + CATEGORY = "KJNodes/image" + + def preview(self, fps, images=None, masks=None): + filename_prefix = "AnimPreview" + full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(filename_prefix, self.output_dir) + results = list() + + pil_images = [] + + if images is not None and masks is not None: + for image in images: + i = 255. * image.cpu().numpy() + img = Image.fromarray(np.clip(i, 0, 255).astype(np.uint8)) + pil_images.append(img) + for mask in masks: + if pil_images: + mask_np = mask.cpu().numpy() + mask_np = np.clip(mask_np * 255, 0, 255).astype(np.uint8) # Convert to values between 0 and 255 + mask_img = Image.fromarray(mask_np, mode='L') + img = pil_images.pop(0) # Remove and get the first image + img = img.convert("RGBA") # Convert base image to RGBA + + # Create a new RGBA image based on the grayscale mask + rgba_mask_img = Image.new("RGBA", img.size, (255, 255, 255, 255)) + rgba_mask_img.putalpha(mask_img) # Use the mask image as the alpha channel + + # Composite the RGBA mask onto the base image + composited_img = Image.alpha_composite(img, rgba_mask_img) + pil_images.append(composited_img) # Add the composited image back + + elif images is not None and masks is None: + for image in images: + i = 255. * image.cpu().numpy() + img = Image.fromarray(np.clip(i, 0, 255).astype(np.uint8)) + pil_images.append(img) + + elif masks is not None and images is None: + for mask in masks: + mask_np = 255. * mask.cpu().numpy() + mask_img = Image.fromarray(np.clip(mask_np, 0, 255).astype(np.uint8)) + pil_images.append(mask_img) + else: + print("PreviewAnimation: No images or masks provided") + return { "ui": { "images": results, "animated": (None,), "text": "empty" }} + + num_frames = len(pil_images) + + c = len(pil_images) + for i in range(0, c, num_frames): + file = f"{filename}_{counter:05}_.webp" + pil_images[i].save(os.path.join(full_output_folder, file), save_all=True, duration=int(1000.0/fps), append_images=pil_images[i + 1:i + num_frames], lossless=False, quality=80, method=4) + results.append({ + "filename": file, + "subfolder": subfolder, + "type": self.type + }) + counter += 1 + + animated = num_frames != 1 + return { "ui": { "images": results, "animated": (animated,), "text": [f"{num_frames}x{pil_images[0].size[0]}x{pil_images[0].size[1]}"] } } + +class ImageResizeKJ: + upscale_methods = ["nearest-exact", "bilinear", "area", "bicubic", "lanczos"] + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "image": ("IMAGE",), + "width": ("INT", { "default": 512, "min": 0, "max": MAX_RESOLUTION, "step": 1, }), + "height": ("INT", { "default": 512, "min": 0, "max": MAX_RESOLUTION, "step": 1, }), + "upscale_method": (s.upscale_methods,), + "keep_proportion": ("BOOLEAN", { "default": False }), + "divisible_by": ("INT", { "default": 2, "min": 0, "max": 512, "step": 1, }), + }, + "optional" : { + "width_input": ("INT", { "forceInput": True}), + "height_input": ("INT", { "forceInput": True}), + "get_image_size": ("IMAGE",), + "crop": (["disabled","center"],), + } + } + + RETURN_TYPES = ("IMAGE", "INT", "INT",) + RETURN_NAMES = ("IMAGE", "width", "height",) + FUNCTION = "resize" + CATEGORY = "KJNodes/image" + DESCRIPTION = """ +Resizes the image to the specified width and height. +Size can be retrieved from the inputs, and the final scale +is determined in this order of importance: +- get_image_size +- width_input and height_input +- width and height widgets + +Keep proportions keeps the aspect ratio of the image, by +highest dimension. +""" + + def resize(self, image, width, height, keep_proportion, upscale_method, divisible_by, + width_input=None, height_input=None, get_image_size=None, crop="disabled"): + B, H, W, C = image.shape + + if width_input: + width = width_input + if height_input: + height = height_input + if get_image_size is not None: + _, height, width, _ = get_image_size.shape + + if keep_proportion and get_image_size is None: + # If one of the dimensions is zero, calculate it to maintain the aspect ratio + if width == 0 and height != 0: + ratio = height / H + width = round(W * ratio) + elif height == 0 and width != 0: + ratio = width / W + height = round(H * ratio) + elif width != 0 and height != 0: + # Scale based on which dimension is smaller in proportion to the desired dimensions + ratio = min(width / W, height / H) + width = round(W * ratio) + height = round(H * ratio) + else: + if width == 0: + width = W + if height == 0: + height = H + + if divisible_by > 1 and get_image_size is None: + width = width - (width % divisible_by) + height = height - (height % divisible_by) + + image = image.movedim(-1,1) + image = common_upscale(image, width, height, upscale_method, crop) + image = image.movedim(1,-1) + + return(image, image.shape[2], image.shape[1],) +import pathlib +class LoadAndResizeImage: + _color_channels = ["alpha", "red", "green", "blue"] + @classmethod + def INPUT_TYPES(s): + input_dir = folder_paths.get_input_directory() + files = [f.name for f in pathlib.Path(input_dir).iterdir() if f.is_file()] + return {"required": + { + "image": (sorted(files), {"image_upload": True}), + "resize": ("BOOLEAN", { "default": False }), + "width": ("INT", { "default": 512, "min": 0, "max": MAX_RESOLUTION, "step": 8, }), + "height": ("INT", { "default": 512, "min": 0, "max": MAX_RESOLUTION, "step": 8, }), + "repeat": ("INT", { "default": 1, "min": 1, "max": 4096, "step": 1, }), + "keep_proportion": ("BOOLEAN", { "default": False }), + "divisible_by": ("INT", { "default": 2, "min": 0, "max": 512, "step": 1, }), + "mask_channel": (s._color_channels, {"tooltip": "Channel to use for the mask output"}), + "background_color": ("STRING", { "default": "", "tooltip": "Fills the alpha channel with the specified color."}), + }, + } + + CATEGORY = "KJNodes/image" + RETURN_TYPES = ("IMAGE", "MASK", "INT", "INT", "STRING",) + RETURN_NAMES = ("image", "mask", "width", "height","image_path",) + FUNCTION = "load_image" + + def load_image(self, image, resize, width, height, repeat, keep_proportion, divisible_by, mask_channel, background_color): + from PIL import ImageColor, Image, ImageOps, ImageSequence + import numpy as np + import torch + image_path = folder_paths.get_annotated_filepath(image) + + import node_helpers + img = node_helpers.pillow(Image.open, image_path) + + # Process the background_color + if background_color: + try: + # Try to parse as RGB tuple + bg_color_rgba = tuple(int(x.strip()) for x in background_color.split(',')) + except ValueError: + # If parsing fails, it might be a hex color or named color + if background_color.startswith('#') or background_color.lower() in ImageColor.colormap: + bg_color_rgba = ImageColor.getrgb(background_color) + else: + raise ValueError(f"Invalid background color: {background_color}") + + bg_color_rgba += (255,) # Add alpha channel + else: + bg_color_rgba = None # No background color specified + + output_images = [] + output_masks = [] + w, h = None, None + + excluded_formats = ['MPO'] + + W, H = img.size + if resize: + if keep_proportion: + ratio = min(width / W, height / H) + width = round(W * ratio) + height = round(H * ratio) + else: + if width == 0: + width = W + if height == 0: + height = H + + if divisible_by > 1: + width = width - (width % divisible_by) + height = height - (height % divisible_by) + else: + width, height = W, H + + for frame in ImageSequence.Iterator(img): + frame = node_helpers.pillow(ImageOps.exif_transpose, frame) + + if frame.mode == 'I': + frame = frame.point(lambda i: i * (1 / 255)) + + if frame.mode == 'P': + frame = frame.convert("RGBA") + elif 'A' in frame.getbands(): + frame = frame.convert("RGBA") + + # Extract alpha channel if it exists + if 'A' in frame.getbands() and bg_color_rgba: + alpha_mask = np.array(frame.getchannel('A')).astype(np.float32) / 255.0 + alpha_mask = 1. - torch.from_numpy(alpha_mask) + bg_image = Image.new("RGBA", frame.size, bg_color_rgba) + # Composite the frame onto the background + frame = Image.alpha_composite(bg_image, frame) + else: + alpha_mask = torch.zeros((64, 64), dtype=torch.float32, device="cpu") + + image = frame.convert("RGB") + + if len(output_images) == 0: + w = image.size[0] + h = image.size[1] + + if image.size[0] != w or image.size[1] != h: + continue + if resize: + image = image.resize((width, height), Image.Resampling.BILINEAR) + + image = np.array(image).astype(np.float32) / 255.0 + image = torch.from_numpy(image)[None,] + + c = mask_channel[0].upper() + if c in frame.getbands(): + if resize: + frame = frame.resize((width, height), Image.Resampling.BILINEAR) + mask = np.array(frame.getchannel(c)).astype(np.float32) / 255.0 + mask = torch.from_numpy(mask) + if c == 'A' and bg_color_rgba: + mask = alpha_mask + elif c == 'A': + mask = 1. - mask + else: + mask = torch.zeros((64, 64), dtype=torch.float32, device="cpu") + + output_images.append(image) + output_masks.append(mask.unsqueeze(0)) + + if len(output_images) > 1 and img.format not in excluded_formats: + output_image = torch.cat(output_images, dim=0) + output_mask = torch.cat(output_masks, dim=0) + else: + output_image = output_images[0] + output_mask = output_masks[0] + if repeat > 1: + output_image = output_image.repeat(repeat, 1, 1, 1) + output_mask = output_mask.repeat(repeat, 1, 1) + + return (output_image, output_mask, width, height, image_path) + + + # @classmethod + # def IS_CHANGED(s, image, **kwargs): + # image_path = folder_paths.get_annotated_filepath(image) + # m = hashlib.sha256() + # with open(image_path, 'rb') as f: + # m.update(f.read()) + # return m.digest().hex() + + @classmethod + def VALIDATE_INPUTS(s, image): + if not folder_paths.exists_annotated_filepath(image): + return "Invalid image file: {}".format(image) + + return True + +class LoadImagesFromFolderKJ: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "folder": ("STRING", {"default": ""}), + "width": ("INT", {"default": 1024, "min": 64, "step": 1}), + "height": ("INT", {"default": 1024, "min": 64, "step": 1}), + "keep_aspect_ratio": (["crop", "pad", "stretch",],), + }, + "optional": { + "image_load_cap": ("INT", {"default": 0, "min": 0, "step": 1}), + "start_index": ("INT", {"default": 0, "min": 0, "step": 1}), + "include_subfolders": ("BOOLEAN", {"default": False}), + } + } + + RETURN_TYPES = ("IMAGE", "MASK", "INT", "STRING",) + RETURN_NAMES = ("image", "mask", "count", "image_path",) + FUNCTION = "load_images" + CATEGORY = "KJNodes/image" + DESCRIPTION = """Loads images from a folder into a batch, images are resized and loaded into a batch.""" + + def load_images(self, folder, width, height, image_load_cap, start_index, keep_aspect_ratio, include_subfolders=False): + if not os.path.isdir(folder): + raise FileNotFoundError(f"Folder '{folder} cannot be found.'") + + valid_extensions = ['.jpg', '.jpeg', '.png', '.webp'] + image_paths = [] + if include_subfolders: + for root, _, files in os.walk(folder): + for file in files: + if any(file.lower().endswith(ext) for ext in valid_extensions): + image_paths.append(os.path.join(root, file)) + else: + for file in os.listdir(folder): + if any(file.lower().endswith(ext) for ext in valid_extensions): + image_paths.append(os.path.join(folder, file)) + + dir_files = sorted(image_paths) + + if len(dir_files) == 0: + raise FileNotFoundError(f"No files in directory '{folder}'.") + + # start at start_index + dir_files = dir_files[start_index:] + + images = [] + masks = [] + image_path_list = [] + + limit_images = False + if image_load_cap > 0: + limit_images = True + image_count = 0 + + for image_path in dir_files: + if os.path.isdir(image_path): + continue + if limit_images and image_count >= image_load_cap: + break + i = Image.open(image_path) + i = ImageOps.exif_transpose(i) + + # Resize image to maximum dimensions + if i.size != (width, height): + i = self.resize_with_aspect_ratio(i, width, height, keep_aspect_ratio) + + + image = i.convert("RGB") + image = np.array(image).astype(np.float32) / 255.0 + image = torch.from_numpy(image)[None,] + + if 'A' in i.getbands(): + mask = np.array(i.getchannel('A')).astype(np.float32) / 255.0 + mask = 1. - torch.from_numpy(mask) + if mask.shape != (height, width): + mask = torch.nn.functional.interpolate(mask.unsqueeze(0).unsqueeze(0), + size=(height, width), + mode='bilinear', + align_corners=False).squeeze() + else: + mask = torch.zeros((height, width), dtype=torch.float32, device="cpu") + + images.append(image) + masks.append(mask) + image_path_list.append(image_path) + image_count += 1 + + if len(images) == 1: + return (images[0], masks[0], 1, image_path_list) + + elif len(images) > 1: + image1 = images[0] + mask1 = masks[0].unsqueeze(0) + + for image2 in images[1:]: + image1 = torch.cat((image1, image2), dim=0) + + for mask2 in masks[1:]: + mask1 = torch.cat((mask1, mask2.unsqueeze(0)), dim=0) + + return (image1, mask1, len(images), image_path_list) + def resize_with_aspect_ratio(self, img, width, height, mode): + if mode == "stretch": + return img.resize((width, height), Image.Resampling.LANCZOS) + + img_width, img_height = img.size + aspect_ratio = img_width / img_height + target_ratio = width / height + + if mode == "crop": + # Calculate dimensions for center crop + if aspect_ratio > target_ratio: + # Image is wider - crop width + new_width = int(height * aspect_ratio) + img = img.resize((new_width, height), Image.Resampling.LANCZOS) + left = (new_width - width) // 2 + return img.crop((left, 0, left + width, height)) + else: + # Image is taller - crop height + new_height = int(width / aspect_ratio) + img = img.resize((width, new_height), Image.Resampling.LANCZOS) + top = (new_height - height) // 2 + return img.crop((0, top, width, top + height)) + + elif mode == "pad": + pad_color = self.get_edge_color(img) + # Calculate dimensions for padding + if aspect_ratio > target_ratio: + # Image is wider - pad height + new_height = int(width / aspect_ratio) + img = img.resize((width, new_height), Image.Resampling.LANCZOS) + padding = (height - new_height) // 2 + padded = Image.new('RGBA', (width, height), pad_color) + padded.paste(img, (0, padding)) + return padded + else: + # Image is taller - pad width + new_width = int(height * aspect_ratio) + img = img.resize((new_width, height), Image.Resampling.LANCZOS) + padding = (width - new_width) // 2 + padded = Image.new('RGBA', (width, height), pad_color) + padded.paste(img, (padding, 0)) + return padded + def get_edge_color(self, img): + from PIL import ImageStat + """Sample edges and return dominant color""" + width, height = img.size + img = img.convert('RGBA') + + # Create 1-pixel high/wide images from edges + top = img.crop((0, 0, width, 1)) + bottom = img.crop((0, height-1, width, height)) + left = img.crop((0, 0, 1, height)) + right = img.crop((width-1, 0, width, height)) + + # Combine edges into single image + edges = Image.new('RGBA', (width*2 + height*2, 1)) + edges.paste(top, (0, 0)) + edges.paste(bottom, (width, 0)) + edges.paste(left.resize((height, 1)), (width*2, 0)) + edges.paste(right.resize((height, 1)), (width*2 + height, 0)) + + # Get median color + stat = ImageStat.Stat(edges) + median = tuple(map(int, stat.median)) + return median + + +class ImageGridtoBatch: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "image": ("IMAGE", ), + "columns": ("INT", {"default": 3, "min": 1, "max": 8, "tooltip": "The number of columns in the grid."}), + "rows": ("INT", {"default": 0, "min": 1, "max": 8, "tooltip": "The number of rows in the grid. Set to 0 for automatic calculation."}), + } + } + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "decompose" + CATEGORY = "KJNodes/image" + DESCRIPTION = "Converts a grid of images to a batch of images." + + def decompose(self, image, columns, rows): + B, H, W, C = image.shape + print("input size: ", image.shape) + + # Calculate cell width, rounding down + cell_width = W // columns + + if rows == 0: + # If rows is 0, calculate number of full rows + rows = H // cell_height + else: + # If rows is specified, adjust cell_height + cell_height = H // rows + + # Crop the image to fit full cells + image = image[:, :rows*cell_height, :columns*cell_width, :] + + # Reshape and permute the image to get the grid + image = image.view(B, rows, cell_height, columns, cell_width, C) + image = image.permute(0, 1, 3, 2, 4, 5).contiguous() + image = image.view(B, rows * columns, cell_height, cell_width, C) + + # Reshape to the final batch tensor + img_tensor = image.view(-1, cell_height, cell_width, C) + + return (img_tensor,) + +class SaveImageKJ: + def __init__(self): + self.type = "output" + self.prefix_append = "" + self.compress_level = 4 + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "images": ("IMAGE", {"tooltip": "The images to save."}), + "filename_prefix": ("STRING", {"default": "ComfyUI", "tooltip": "The prefix for the file to save. This may include formatting information such as %date:yyyy-MM-dd% or %Empty Latent Image.width% to include values from nodes."}), + "output_folder": ("STRING", {"default": "output", "tooltip": "The folder to save the images to."}), + }, + "optional": { + "caption_file_extension": ("STRING", {"default": ".txt", "tooltip": "The extension for the caption file."}), + "caption": ("STRING", {"forceInput": True, "tooltip": "string to save as .txt file"}), + }, + "hidden": { + "prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO" + }, + } + + RETURN_TYPES = ("STRING",) + RETURN_NAMES = ("filename",) + FUNCTION = "save_images" + + OUTPUT_NODE = True + + CATEGORY = "KJNodes/image" + DESCRIPTION = "Saves the input images to your ComfyUI output directory." + + def save_images(self, images, output_folder, filename_prefix="ComfyUI", prompt=None, extra_pnginfo=None, caption=None, caption_file_extension=".txt"): + filename_prefix += self.prefix_append + + if os.path.isabs(output_folder): + if not os.path.exists(output_folder): + os.makedirs(output_folder, exist_ok=True) + full_output_folder = output_folder + _, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(filename_prefix, output_folder, images[0].shape[1], images[0].shape[0]) + else: + self.output_dir = folder_paths.get_output_directory() + full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(filename_prefix, self.output_dir, images[0].shape[1], images[0].shape[0]) + + results = list() + for (batch_number, image) in enumerate(images): + i = 255. * image.cpu().numpy() + img = Image.fromarray(np.clip(i, 0, 255).astype(np.uint8)) + metadata = None + if not args.disable_metadata: + metadata = PngInfo() + if prompt is not None: + metadata.add_text("prompt", json.dumps(prompt)) + if extra_pnginfo is not None: + for x in extra_pnginfo: + metadata.add_text(x, json.dumps(extra_pnginfo[x])) + + filename_with_batch_num = filename.replace("%batch_num%", str(batch_number)) + base_file_name = f"{filename_with_batch_num}_{counter:05}_" + file = f"{base_file_name}.png" + img.save(os.path.join(full_output_folder, file), pnginfo=metadata, compress_level=self.compress_level) + results.append({ + "filename": file, + "subfolder": subfolder, + "type": self.type + }) + if caption is not None: + txt_file = base_file_name + caption_file_extension + file_path = os.path.join(full_output_folder, txt_file) + with open(file_path, 'w') as f: + f.write(caption) + + counter += 1 + + return file, + +class SaveStringKJ: + def __init__(self): + self.output_dir = folder_paths.get_output_directory() + self.type = "output" + self.prefix_append = "" + self.compress_level = 4 + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "string": ("STRING", {"forceInput": True, "tooltip": "string to save as .txt file"}), + "filename_prefix": ("STRING", {"default": "text", "tooltip": "The prefix for the file to save. This may include formatting information such as %date:yyyy-MM-dd% or %Empty Latent Image.width% to include values from nodes."}), + "output_folder": ("STRING", {"default": "output", "tooltip": "The folder to save the images to."}), + }, + "optional": { + "file_extension": ("STRING", {"default": ".txt", "tooltip": "The extension for the caption file."}), + }, + } + + RETURN_TYPES = ("STRING",) + RETURN_NAMES = ("filename",) + FUNCTION = "save_string" + + OUTPUT_NODE = True + + CATEGORY = "KJNodes/misc" + DESCRIPTION = "Saves the input string to your ComfyUI output directory." + + def save_string(self, string, output_folder, filename_prefix="text", file_extension=".txt"): + filename_prefix += self.prefix_append + + full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(filename_prefix, self.output_dir) + if output_folder != "output": + if not os.path.exists(output_folder): + os.makedirs(output_folder, exist_ok=True) + full_output_folder = output_folder + + base_file_name = f"{filename_prefix}_{counter:05}_" + results = list() + + txt_file = base_file_name + file_extension + file_path = os.path.join(full_output_folder, txt_file) + with open(file_path, 'w') as f: + f.write(string) + + return results, + +to_pil_image = T.ToPILImage() + +class FastPreview: + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "image": ("IMAGE", ), + "format": (["JPEG", "PNG", "WEBP"], {"default": "JPEG"}), + "quality" : ("INT", {"default": 75, "min": 1, "max": 100, "step": 1}), + }, + } + + RETURN_TYPES = () + FUNCTION = "preview" + CATEGORY = "KJNodes/experimental" + OUTPUT_NODE = True + DESCRIPTION = "Experimental node for faster image previews by displaying through base64 it without saving to disk." + + def preview(self, image, format, quality): + pil_image = to_pil_image(image[0].permute(2, 0, 1)) + + with io.BytesIO() as buffered: + pil_image.save(buffered, format=format, quality=quality) + img_bytes = buffered.getvalue() + + img_base64 = base64.b64encode(img_bytes).decode('utf-8') + + return { + "ui": {"bg_image": [img_base64]}, + "result": () + } + +class ImageCropByMaskAndResize: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "image": ("IMAGE", ), + "mask": ("MASK", ), + "base_resolution": ("INT", { "default": 512, "min": 0, "max": MAX_RESOLUTION, "step": 8, }), + "padding": ("INT", { "default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 1, }), + "min_crop_resolution": ("INT", { "default": 128, "min": 0, "max": MAX_RESOLUTION, "step": 8, }), + "max_crop_resolution": ("INT", { "default": 512, "min": 0, "max": MAX_RESOLUTION, "step": 8, }), + + }, + } + + RETURN_TYPES = ("IMAGE", "MASK", "BBOX", ) + RETURN_NAMES = ("images", "masks", "bbox",) + FUNCTION = "crop" + CATEGORY = "KJNodes/image" + + def crop_by_mask(self, mask, padding=0, min_crop_resolution=None, max_crop_resolution=None): + iy, ix = (mask == 1).nonzero(as_tuple=True) + h0, w0 = mask.shape + + if iy.numel() == 0: + x_c = w0 / 2.0 + y_c = h0 / 2.0 + width = 0 + height = 0 + else: + x_min = ix.min().item() + x_max = ix.max().item() + y_min = iy.min().item() + y_max = iy.max().item() + + width = x_max - x_min + height = y_max - y_min + + if width > w0 or height > h0: + raise Exception("Masked area out of bounds") + + x_c = (x_min + x_max) / 2.0 + y_c = (y_min + y_max) / 2.0 + + if min_crop_resolution: + width = max(width, min_crop_resolution) + height = max(height, min_crop_resolution) + + if max_crop_resolution: + width = min(width, max_crop_resolution) + height = min(height, max_crop_resolution) + + if w0 <= width: + x0 = 0 + w = w0 + else: + x0 = max(0, x_c - width / 2 - padding) + w = width + 2 * padding + if x0 + w > w0: + x0 = w0 - w + + if h0 <= height: + y0 = 0 + h = h0 + else: + y0 = max(0, y_c - height / 2 - padding) + h = height + 2 * padding + if y0 + h > h0: + y0 = h0 - h + + return (int(x0), int(y0), int(w), int(h)) + + def crop(self, image, mask, base_resolution, padding=0, min_crop_resolution=128, max_crop_resolution=512): + mask = mask.round() + image_list = [] + mask_list = [] + bbox_list = [] + + # First, collect all bounding boxes + bbox_params = [] + aspect_ratios = [] + for i in range(image.shape[0]): + x0, y0, w, h = self.crop_by_mask(mask[i], padding, min_crop_resolution, max_crop_resolution) + bbox_params.append((x0, y0, w, h)) + aspect_ratios.append(w / h) + + # Find maximum width and height + max_w = max([w for x0, y0, w, h in bbox_params]) + max_h = max([h for x0, y0, w, h in bbox_params]) + max_aspect_ratio = max(aspect_ratios) + + # Ensure dimensions are divisible by 16 + max_w = (max_w + 15) // 16 * 16 + max_h = (max_h + 15) // 16 * 16 + # Calculate common target dimensions + if max_aspect_ratio > 1: + target_width = base_resolution + target_height = int(base_resolution / max_aspect_ratio) + else: + target_height = base_resolution + target_width = int(base_resolution * max_aspect_ratio) + + for i in range(image.shape[0]): + x0, y0, w, h = bbox_params[i] + + # Adjust cropping to use maximum width and height + x_center = x0 + w / 2 + y_center = y0 + h / 2 + + x0_new = int(max(0, x_center - max_w / 2)) + y0_new = int(max(0, y_center - max_h / 2)) + x1_new = int(min(x0_new + max_w, image.shape[2])) + y1_new = int(min(y0_new + max_h, image.shape[1])) + x0_new = x1_new - max_w + y0_new = y1_new - max_h + + cropped_image = image[i][y0_new:y1_new, x0_new:x1_new, :] + cropped_mask = mask[i][y0_new:y1_new, x0_new:x1_new] + + # Ensure dimensions are divisible by 16 + target_width = (target_width + 15) // 16 * 16 + target_height = (target_height + 15) // 16 * 16 + + cropped_image = cropped_image.unsqueeze(0).movedim(-1, 1) # Move C to the second position (B, C, H, W) + cropped_image = common_upscale(cropped_image, target_width, target_height, "lanczos", "disabled") + cropped_image = cropped_image.movedim(1, -1).squeeze(0) + + cropped_mask = cropped_mask.unsqueeze(0).unsqueeze(0) + cropped_mask = common_upscale(cropped_mask, target_width, target_height, 'bilinear', "disabled") + cropped_mask = cropped_mask.squeeze(0).squeeze(0) + + image_list.append(cropped_image) + mask_list.append(cropped_mask) + bbox_list.append((x0_new, y0_new, x1_new, y1_new)) + + + return (torch.stack(image_list), torch.stack(mask_list), bbox_list) + +class ImageCropByMask: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "image": ("IMAGE", ), + "mask": ("MASK", ), + }, + } + + RETURN_TYPES = ("IMAGE", ) + RETURN_NAMES = ("image", ) + FUNCTION = "crop" + CATEGORY = "KJNodes/image" + DESCRIPTION = "Crops the input images based on the provided mask." + + def crop(self, image, mask): + B, H, W, C = image.shape + mask = mask.round() + + # Find bounding box for each batch + crops = [] + + for b in range(B): + # Get coordinates of non-zero elements + rows = torch.any(mask[min(b, mask.shape[0]-1)] > 0, dim=1) + cols = torch.any(mask[min(b, mask.shape[0]-1)] > 0, dim=0) + + # Find boundaries + y_min, y_max = torch.where(rows)[0][[0, -1]] + x_min, x_max = torch.where(cols)[0][[0, -1]] + + # Crop image and mask + crop = image[b:b+1, y_min:y_max+1, x_min:x_max+1, :] + crops.append(crop) + + # Stack results back together + cropped_images = torch.cat(crops, dim=0) + + return (cropped_images, ) + + + +class ImageUncropByMask: + + @classmethod + def INPUT_TYPES(s): + return {"required": + { + "destination": ("IMAGE",), + "source": ("IMAGE",), + "mask": ("MASK",), + "bbox": ("BBOX",), + }, + } + + CATEGORY = "KJNodes/image" + RETURN_TYPES = ("IMAGE",) + RETURN_NAMES = ("image",) + FUNCTION = "uncrop" + + def uncrop(self, destination, source, mask, bbox=None): + + output_list = [] + + B, H, W, C = destination.shape + + for i in range(source.shape[0]): + x0, y0, x1, y1 = bbox[i] + bbox_height = y1 - y0 + bbox_width = x1 - x0 + + # Resize source image to match the bounding box dimensions + #resized_source = F.interpolate(source[i].unsqueeze(0).movedim(-1, 1), size=(bbox_height, bbox_width), mode='bilinear', align_corners=False) + resized_source = common_upscale(source[i].unsqueeze(0).movedim(-1, 1), bbox_width, bbox_height, "lanczos", "disabled") + resized_source = resized_source.movedim(1, -1).squeeze(0) + + # Resize mask to match the bounding box dimensions + resized_mask = common_upscale(mask[i].unsqueeze(0).unsqueeze(0), bbox_width, bbox_height, "bilinear", "disabled") + resized_mask = resized_mask.squeeze(0).squeeze(0) + + # Calculate padding values + pad_left = x0 + pad_right = W - x1 + pad_top = y0 + pad_bottom = H - y1 + + # Pad the resized source image and mask to fit the destination dimensions + padded_source = F.pad(resized_source, pad=(0, 0, pad_left, pad_right, pad_top, pad_bottom), mode='constant', value=0) + padded_mask = F.pad(resized_mask, pad=(pad_left, pad_right, pad_top, pad_bottom), mode='constant', value=0) + + # Ensure the padded mask has the correct shape + padded_mask = padded_mask.unsqueeze(2).expand(-1, -1, destination[i].shape[2]) + # Ensure the padded source has the correct shape + padded_source = padded_source.unsqueeze(2).expand(-1, -1, -1, destination[i].shape[2]).squeeze(2) + + # Combine the destination and padded source images using the mask + result = destination[i] * (1.0 - padded_mask) + padded_source * padded_mask + + output_list.append(result) + + + return (torch.stack(output_list),) + +class ImageCropByMaskBatch: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "image": ("IMAGE", ), + "masks": ("MASK", ), + "width": ("INT", {"default": 512, "min": 0, "max": MAX_RESOLUTION, "step": 8, }), + "height": ("INT", {"default": 512, "min": 0, "max": MAX_RESOLUTION, "step": 8, }), + "padding": ("INT", {"default": 0, "min": 0, "max": 4096, "step": 1, }), + "preserve_size": ("BOOLEAN", {"default": False}), + "bg_color": ("STRING", {"default": "0, 0, 0", "tooltip": "Color as RGB values in range 0-255, separated by commas."}), + } + } + + RETURN_TYPES = ("IMAGE", "MASK", ) + RETURN_NAMES = ("images", "masks",) + FUNCTION = "crop" + CATEGORY = "KJNodes/image" + DESCRIPTION = "Crops the input images based on the provided masks." + + def crop(self, image, masks, width, height, bg_color, padding, preserve_size): + B, H, W, C = image.shape + BM, HM, WM = masks.shape + mask_count = BM + if HM != H or WM != W: + masks = F.interpolate(masks.unsqueeze(1), size=(H, W), mode='nearest-exact').squeeze(1) + print(masks.shape) + output_images = [] + output_masks = [] + + bg_color = [int(x.strip())/255.0 for x in bg_color.split(",")] + + # For each mask + for i in range(mask_count): + curr_mask = masks[i] + + # Find bounds + y_indices, x_indices = torch.nonzero(curr_mask, as_tuple=True) + if len(y_indices) == 0 or len(x_indices) == 0: + continue + + # Get exact bounds with padding + min_y = max(0, y_indices.min().item() - padding) + max_y = min(H, y_indices.max().item() + 1 + padding) + min_x = max(0, x_indices.min().item() - padding) + max_x = min(W, x_indices.max().item() + 1 + padding) + + # Ensure mask has correct shape for multiplication + curr_mask = curr_mask.unsqueeze(-1).expand(-1, -1, C) + + # Crop image and mask together + cropped_img = image[0, min_y:max_y, min_x:max_x, :] + cropped_mask = curr_mask[min_y:max_y, min_x:max_x, :] + + crop_h, crop_w = cropped_img.shape[0:2] + new_w = crop_w + new_h = crop_h + + if not preserve_size or crop_w > width or crop_h > height: + scale = min(width/crop_w, height/crop_h) + new_w = int(crop_w * scale) + new_h = int(crop_h * scale) + + # Resize RGB + resized_img = common_upscale(cropped_img.permute(2,0,1).unsqueeze(0), new_w, new_h, "lanczos", "disabled").squeeze(0).permute(1,2,0) + resized_mask = torch.nn.functional.interpolate( + cropped_mask.permute(2,0,1).unsqueeze(0), + size=(new_h, new_w), + mode='nearest' + ).squeeze(0).permute(1,2,0) + else: + resized_img = cropped_img + resized_mask = cropped_mask + + # Create empty tensors + new_img = torch.zeros((height, width, 3), dtype=image.dtype) + new_mask = torch.zeros((height, width), dtype=image.dtype) + + # Pad both + pad_x = (width - new_w) // 2 + pad_y = (height - new_h) // 2 + new_img[pad_y:pad_y+new_h, pad_x:pad_x+new_w, :] = resized_img + if len(resized_mask.shape) == 3: + resized_mask = resized_mask[:,:,0] # Take first channel if 3D + new_mask[pad_y:pad_y+new_h, pad_x:pad_x+new_w] = resized_mask + + output_images.append(new_img) + output_masks.append(new_mask) + + if not output_images: + return (torch.zeros((0, height, width, 3), dtype=image.dtype),) + + out_rgb = torch.stack(output_images, dim=0) + out_masks = torch.stack(output_masks, dim=0) + + # Apply mask to RGB + mask_expanded = out_masks.unsqueeze(-1).expand(-1, -1, -1, 3) + background_color = torch.tensor(bg_color, dtype=torch.float32, device=image.device) + out_rgb = out_rgb * mask_expanded + background_color * (1 - mask_expanded) + + return (out_rgb, out_masks) + +class ImagePadKJ: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "image": ("IMAGE", ), + "left": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 1, }), + "right": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 1, }), + "top": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 1, }), + "bottom": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 1, }), + "extra_padding": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 1, }), + "pad_mode": (["edge", "color"],), + "color": ("STRING", {"default": "0, 0, 0", "tooltip": "Color as RGB values in range 0-255, separated by commas."}), + } + , "optional": { + "masks": ("MASK", ), + } + } + + RETURN_TYPES = ("IMAGE", "MASK", ) + RETURN_NAMES = ("images", "masks",) + FUNCTION = "pad" + CATEGORY = "KJNodes/image" + DESCRIPTION = "Pad the input image and optionally mask with the specified padding." + + def pad(self, image, left, right, top, bottom, extra_padding, color, pad_mode, mask=None): + B, H, W, C = image.shape + + # Resize masks to image dimensions if necessary + if mask is not None: + BM, HM, WM = mask.shape + if HM != H or WM != W: + mask = F.interpolate(mask.unsqueeze(1), size=(H, W), mode='nearest-exact').squeeze(1) + + # Parse background color + bg_color = [int(x.strip())/255.0 for x in color.split(",")] + if len(bg_color) == 1: + bg_color = bg_color * 3 # Grayscale to RGB + bg_color = torch.tensor(bg_color, dtype=image.dtype, device=image.device) + + # Calculate padding sizes with extra padding + pad_left = left + extra_padding + pad_right = right + extra_padding + pad_top = top + extra_padding + pad_bottom = bottom + extra_padding + + padded_width = W + pad_left + pad_right + padded_height = H + pad_top + pad_bottom + out_image = torch.zeros((B, padded_height, padded_width, C), dtype=image.dtype, device=image.device) + + # Fill padded areas + for b in range(B): + if pad_mode == "edge": + # Pad with edge color + # Define edge pixels + top_edge = image[b, 0, :, :] + bottom_edge = image[b, H-1, :, :] + left_edge = image[b, :, 0, :] + right_edge = image[b, :, W-1, :] + + # Fill borders with edge colors + out_image[b, :pad_top, :, :] = top_edge.mean(dim=0) + out_image[b, pad_top+H:, :, :] = bottom_edge.mean(dim=0) + out_image[b, :, :pad_left, :] = left_edge.mean(dim=0) + out_image[b, :, pad_left+W:, :] = right_edge.mean(dim=0) + out_image[b, pad_top:pad_top+H, pad_left:pad_left+W, :] = image[b] + else: + # Pad with specified background color + out_image[b, :, :, :] = bg_color.unsqueeze(0).unsqueeze(0) # Expand for H and W dimensions + out_image[b, pad_top:pad_top+H, pad_left:pad_left+W, :] = image[b] + + if mask is not None: + out_masks = torch.zeros((BM, padded_height, padded_width), dtype=mask.dtype, device=mask.device) + for m in range(BM): + out_masks[m, pad_top:pad_top+H, pad_left:pad_left+W] = mask[m] + else: + out_masks = torch.zeros((1, padded_height, padded_width), dtype=image.dtype, device=image.device) + + return (out_image, out_masks) diff --git a/custom_nodes/ComfyUI-KJNodes-main/nodes/intrinsic_lora_nodes.py b/custom_nodes/ComfyUI-KJNodes-main/nodes/intrinsic_lora_nodes.py new file mode 100644 index 0000000000000000000000000000000000000000..c8f125363836cc7721b4b61d100702594522d389 --- /dev/null +++ b/custom_nodes/ComfyUI-KJNodes-main/nodes/intrinsic_lora_nodes.py @@ -0,0 +1,115 @@ +import folder_paths +import os +import torch +import torch.nn.functional as F +from comfy.utils import ProgressBar, load_torch_file +import comfy.sample +from nodes import CLIPTextEncode + +script_directory = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) +folder_paths.add_model_folder_path("intrinsic_loras", os.path.join(script_directory, "intrinsic_loras")) + +class Intrinsic_lora_sampling: + def __init__(self): + self.loaded_lora = None + + @classmethod + def INPUT_TYPES(s): + return {"required": { "model": ("MODEL",), + "lora_name": (folder_paths.get_filename_list("intrinsic_loras"), ), + "task": ( + [ + 'depth map', + 'surface normals', + 'albedo', + 'shading', + ], + { + "default": 'depth map' + }), + "text": ("STRING", {"multiline": True, "default": ""}), + "clip": ("CLIP", ), + "vae": ("VAE", ), + "per_batch": ("INT", {"default": 16, "min": 1, "max": 4096, "step": 1}), + }, + "optional": { + "image": ("IMAGE",), + "optional_latent": ("LATENT",), + }, + } + + RETURN_TYPES = ("IMAGE", "LATENT",) + FUNCTION = "onestepsample" + CATEGORY = "KJNodes" + DESCRIPTION = """ +Sampler to use the intrinsic loras: +https://github.com/duxiaodan/intrinsic-lora +These LoRAs are tiny and thus included +with this node pack. +""" + + def onestepsample(self, model, lora_name, clip, vae, text, task, per_batch, image=None, optional_latent=None): + pbar = ProgressBar(3) + + if optional_latent is None: + image_list = [] + for start_idx in range(0, image.shape[0], per_batch): + sub_pixels = vae.vae_encode_crop_pixels(image[start_idx:start_idx+per_batch]) + image_list.append(vae.encode(sub_pixels[:,:,:,:3])) + sample = torch.cat(image_list, dim=0) + else: + sample = optional_latent["samples"] + noise = torch.zeros(sample.size(), dtype=sample.dtype, layout=sample.layout, device="cpu") + prompt = task + "," + text + positive, = CLIPTextEncode.encode(self, clip, prompt) + negative = positive #negative shouldn't do anything in this scenario + + pbar.update(1) + + #custom model sampling to pass latent through as it is + class X0_PassThrough(comfy.model_sampling.EPS): + def calculate_denoised(self, sigma, model_output, model_input): + return model_output + def calculate_input(self, sigma, noise): + return noise + sampling_base = comfy.model_sampling.ModelSamplingDiscrete + sampling_type = X0_PassThrough + + class ModelSamplingAdvanced(sampling_base, sampling_type): + pass + model_sampling = ModelSamplingAdvanced(model.model.model_config) + + #load lora + model_clone = model.clone() + lora_path = folder_paths.get_full_path("intrinsic_loras", lora_name) + lora = load_torch_file(lora_path, safe_load=True) + self.loaded_lora = (lora_path, lora) + + model_clone_with_lora = comfy.sd.load_lora_for_models(model_clone, None, lora, 1.0, 0)[0] + + model_clone_with_lora.add_object_patch("model_sampling", model_sampling) + + samples = {"samples": comfy.sample.sample(model_clone_with_lora, noise, 1, 1.0, "euler", "simple", positive, negative, sample, + denoise=1.0, disable_noise=True, start_step=0, last_step=1, + force_full_denoise=True, noise_mask=None, callback=None, disable_pbar=True, seed=None)} + pbar.update(1) + + decoded = [] + for start_idx in range(0, samples["samples"].shape[0], per_batch): + decoded.append(vae.decode(samples["samples"][start_idx:start_idx+per_batch])) + image_out = torch.cat(decoded, dim=0) + + pbar.update(1) + + if task == 'depth map': + imax = image_out.max() + imin = image_out.min() + image_out = (image_out-imin)/(imax-imin) + image_out = torch.max(image_out, dim=3, keepdim=True)[0].repeat(1, 1, 1, 3) + elif task == 'surface normals': + image_out = F.normalize(image_out * 2 - 1, dim=3) / 2 + 0.5 + image_out = 1.0 - image_out + else: + image_out = image_out.clamp(-1.,1.) + + return (image_out, samples,) \ No newline at end of file diff --git a/custom_nodes/ComfyUI-KJNodes-main/nodes/mask_nodes.py b/custom_nodes/ComfyUI-KJNodes-main/nodes/mask_nodes.py new file mode 100644 index 0000000000000000000000000000000000000000..8852d0662d0cd5ca2c4be6add22fe77e65ee7442 --- /dev/null +++ b/custom_nodes/ComfyUI-KJNodes-main/nodes/mask_nodes.py @@ -0,0 +1,1397 @@ +import torch +import torch.nn.functional as F +from torchvision.transforms import functional as TF +from PIL import Image, ImageDraw, ImageFilter, ImageFont +import scipy.ndimage +import numpy as np +from contextlib import nullcontext +import os + +import model_management +from comfy.utils import ProgressBar +from comfy.utils import common_upscale +from nodes import MAX_RESOLUTION + +import folder_paths + +from ..utility.utility import tensor2pil, pil2tensor + +script_directory = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + +class BatchCLIPSeg: + + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(s): + + return {"required": + { + "images": ("IMAGE",), + "text": ("STRING", {"multiline": False}), + "threshold": ("FLOAT", {"default": 0.5,"min": 0.0, "max": 10.0, "step": 0.001}), + "binary_mask": ("BOOLEAN", {"default": True}), + "combine_mask": ("BOOLEAN", {"default": False}), + "use_cuda": ("BOOLEAN", {"default": True}), + }, + "optional": + { + "blur_sigma": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 100.0, "step": 0.1}), + "opt_model": ("CLIPSEGMODEL", ), + "prev_mask": ("MASK", {"default": None}), + "image_bg_level": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}), + "invert": ("BOOLEAN", {"default": False}), + } + } + + CATEGORY = "KJNodes/masking" + RETURN_TYPES = ("MASK", "IMAGE", ) + RETURN_NAMES = ("Mask", "Image", ) + FUNCTION = "segment_image" + DESCRIPTION = """ +Segments an image or batch of images using CLIPSeg. +""" + + def segment_image(self, images, text, threshold, binary_mask, combine_mask, use_cuda, blur_sigma=0.0, opt_model=None, prev_mask=None, invert= False, image_bg_level=0.5): + from transformers import CLIPSegProcessor, CLIPSegForImageSegmentation + import torchvision.transforms as transforms + offload_device = model_management.unet_offload_device() + device = model_management.get_torch_device() + if not use_cuda: + device = torch.device("cpu") + dtype = model_management.unet_dtype() + + if opt_model is None: + checkpoint_path = os.path.join(folder_paths.models_dir,'clip_seg', 'clipseg-rd64-refined-fp16') + if not hasattr(self, "model"): + try: + if not os.path.exists(checkpoint_path): + from huggingface_hub import snapshot_download + snapshot_download(repo_id="Kijai/clipseg-rd64-refined-fp16", local_dir=checkpoint_path, local_dir_use_symlinks=False) + self.model = CLIPSegForImageSegmentation.from_pretrained(checkpoint_path) + except: + checkpoint_path = "CIDAS/clipseg-rd64-refined" + self.model = CLIPSegForImageSegmentation.from_pretrained(checkpoint_path) + processor = CLIPSegProcessor.from_pretrained(checkpoint_path) + + else: + self.model = opt_model['model'] + processor = opt_model['processor'] + + self.model.to(dtype).to(device) + + B, H, W, C = images.shape + images = images.to(device) + + autocast_condition = (dtype != torch.float32) and not model_management.is_device_mps(device) + with torch.autocast(model_management.get_autocast_device(device), dtype=dtype) if autocast_condition else nullcontext(): + + PIL_images = [Image.fromarray(np.clip(255. * image.cpu().numpy().squeeze(), 0, 255).astype(np.uint8)) for image in images ] + prompt = [text] * len(images) + input_prc = processor(text=prompt, images=PIL_images, return_tensors="pt") + + for key in input_prc: + input_prc[key] = input_prc[key].to(device) + outputs = self.model(**input_prc) + + mask_tensor = torch.sigmoid(outputs.logits) + mask_tensor = (mask_tensor - mask_tensor.min()) / (mask_tensor.max() - mask_tensor.min()) + mask_tensor = torch.where(mask_tensor > (threshold), mask_tensor, torch.tensor(0, dtype=torch.float)) + print(mask_tensor.shape) + if len(mask_tensor.shape) == 2: + mask_tensor = mask_tensor.unsqueeze(0) + mask_tensor = F.interpolate(mask_tensor.unsqueeze(1), size=(H, W), mode='nearest') + mask_tensor = mask_tensor.squeeze(1) + + self.model.to(offload_device) + + if binary_mask: + mask_tensor = (mask_tensor > 0).float() + if blur_sigma > 0: + kernel_size = int(6 * int(blur_sigma) + 1) + blur = transforms.GaussianBlur(kernel_size=(kernel_size, kernel_size), sigma=(blur_sigma, blur_sigma)) + mask_tensor = blur(mask_tensor) + + if combine_mask: + mask_tensor = torch.max(mask_tensor, dim=0)[0] + mask_tensor = mask_tensor.unsqueeze(0).repeat(len(images),1,1) + + del outputs + model_management.soft_empty_cache() + + if prev_mask is not None: + if prev_mask.shape != mask_tensor.shape: + prev_mask = F.interpolate(prev_mask.unsqueeze(1), size=(H, W), mode='nearest') + mask_tensor = mask_tensor + prev_mask.to(device) + torch.clamp(mask_tensor, min=0.0, max=1.0) + + if invert: + mask_tensor = 1 - mask_tensor + + image_tensor = images * mask_tensor.unsqueeze(-1) + (1 - mask_tensor.unsqueeze(-1)) * image_bg_level + image_tensor = torch.clamp(image_tensor, min=0.0, max=1.0).cpu().float() + + mask_tensor = mask_tensor.cpu().float() + + return mask_tensor, image_tensor, + +class DownloadAndLoadCLIPSeg: + + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(s): + + return {"required": + { + "model": ( + [ 'Kijai/clipseg-rd64-refined-fp16', + 'CIDAS/clipseg-rd64-refined', + ], + ), + }, + } + + CATEGORY = "KJNodes/masking" + RETURN_TYPES = ("CLIPSEGMODEL",) + RETURN_NAMES = ("clipseg_model",) + FUNCTION = "segment_image" + DESCRIPTION = """ +Downloads and loads CLIPSeg model with huggingface_hub, +to ComfyUI/models/clip_seg +""" + + def segment_image(self, model): + from transformers import CLIPSegProcessor, CLIPSegForImageSegmentation + checkpoint_path = os.path.join(folder_paths.models_dir,'clip_seg', os.path.basename(model)) + if not hasattr(self, "model"): + if not os.path.exists(checkpoint_path): + from huggingface_hub import snapshot_download + snapshot_download(repo_id=model, local_dir=checkpoint_path, local_dir_use_symlinks=False) + self.model = CLIPSegForImageSegmentation.from_pretrained(checkpoint_path) + + processor = CLIPSegProcessor.from_pretrained(checkpoint_path) + + clipseg_model = {} + clipseg_model['model'] = self.model + clipseg_model['processor'] = processor + + return clipseg_model, + +class CreateTextMask: + + RETURN_TYPES = ("IMAGE", "MASK",) + FUNCTION = "createtextmask" + CATEGORY = "KJNodes/text" + DESCRIPTION = """ +Creates a text image and mask. +Looks for fonts from this folder: +ComfyUI/custom_nodes/ComfyUI-KJNodes/fonts + +If start_rotation and/or end_rotation are different values, +creates animation between them. +""" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "invert": ("BOOLEAN", {"default": False}), + "frames": ("INT", {"default": 1,"min": 1, "max": 4096, "step": 1}), + "text_x": ("INT", {"default": 0,"min": 0, "max": 4096, "step": 1}), + "text_y": ("INT", {"default": 0,"min": 0, "max": 4096, "step": 1}), + "font_size": ("INT", {"default": 32,"min": 8, "max": 4096, "step": 1}), + "font_color": ("STRING", {"default": "white"}), + "text": ("STRING", {"default": "HELLO!", "multiline": True}), + "font": (folder_paths.get_filename_list("kjnodes_fonts"), ), + "width": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}), + "height": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}), + "start_rotation": ("INT", {"default": 0,"min": 0, "max": 359, "step": 1}), + "end_rotation": ("INT", {"default": 0,"min": -359, "max": 359, "step": 1}), + }, + } + + def createtextmask(self, frames, width, height, invert, text_x, text_y, text, font_size, font_color, font, start_rotation, end_rotation): + # Define the number of images in the batch + batch_size = frames + out = [] + masks = [] + rotation = start_rotation + if start_rotation != end_rotation: + rotation_increment = (end_rotation - start_rotation) / (batch_size - 1) + + font_path = folder_paths.get_full_path("kjnodes_fonts", font) + # Generate the text + for i in range(batch_size): + image = Image.new("RGB", (width, height), "black") + draw = ImageDraw.Draw(image) + font = ImageFont.truetype(font_path, font_size) + + # Split the text into words + words = text.split() + + # Initialize variables for line creation + lines = [] + current_line = [] + current_line_width = 0 + try: #new pillow + # Iterate through words to create lines + for word in words: + word_width = font.getbbox(word)[2] + if current_line_width + word_width <= width - 2 * text_x: + current_line.append(word) + current_line_width += word_width + font.getbbox(" ")[2] # Add space width + else: + lines.append(" ".join(current_line)) + current_line = [word] + current_line_width = word_width + except: #old pillow + for word in words: + word_width = font.getsize(word)[0] + if current_line_width + word_width <= width - 2 * text_x: + current_line.append(word) + current_line_width += word_width + font.getsize(" ")[0] # Add space width + else: + lines.append(" ".join(current_line)) + current_line = [word] + current_line_width = word_width + + # Add the last line if it's not empty + if current_line: + lines.append(" ".join(current_line)) + + # Draw each line of text separately + y_offset = text_y + for line in lines: + text_width = font.getlength(line) + text_height = font_size + text_center_x = text_x + text_width / 2 + text_center_y = y_offset + text_height / 2 + try: + draw.text((text_x, y_offset), line, font=font, fill=font_color, features=['-liga']) + except: + draw.text((text_x, y_offset), line, font=font, fill=font_color) + y_offset += text_height # Move to the next line + + if start_rotation != end_rotation: + image = image.rotate(rotation, center=(text_center_x, text_center_y)) + rotation += rotation_increment + + image = np.array(image).astype(np.float32) / 255.0 + image = torch.from_numpy(image)[None,] + mask = image[:, :, :, 0] + masks.append(mask) + out.append(image) + + if invert: + return (1.0 - torch.cat(out, dim=0), 1.0 - torch.cat(masks, dim=0),) + return (torch.cat(out, dim=0),torch.cat(masks, dim=0),) + +class ColorToMask: + + RETURN_TYPES = ("MASK",) + FUNCTION = "clip" + CATEGORY = "KJNodes/masking" + DESCRIPTION = """ +Converts chosen RGB value to a mask. +With batch inputs, the **per_batch** +controls the number of images processed at once. +""" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "images": ("IMAGE",), + "invert": ("BOOLEAN", {"default": False}), + "red": ("INT", {"default": 0,"min": 0, "max": 255, "step": 1}), + "green": ("INT", {"default": 0,"min": 0, "max": 255, "step": 1}), + "blue": ("INT", {"default": 0,"min": 0, "max": 255, "step": 1}), + "threshold": ("INT", {"default": 10,"min": 0, "max": 255, "step": 1}), + "per_batch": ("INT", {"default": 16, "min": 1, "max": 4096, "step": 1}), + }, + } + + def clip(self, images, red, green, blue, threshold, invert, per_batch): + + color = torch.tensor([red, green, blue], dtype=torch.uint8) + black = torch.tensor([0, 0, 0], dtype=torch.uint8) + white = torch.tensor([255, 255, 255], dtype=torch.uint8) + + if invert: + black, white = white, black + + steps = images.shape[0] + pbar = ProgressBar(steps) + tensors_out = [] + + for start_idx in range(0, images.shape[0], per_batch): + + # Calculate color distances + color_distances = torch.norm(images[start_idx:start_idx+per_batch] * 255 - color, dim=-1) + + # Create a mask based on the threshold + mask = color_distances <= threshold + + # Apply the mask to create new images + mask_out = torch.where(mask.unsqueeze(-1), white, black).float() + mask_out = mask_out.mean(dim=-1) + + tensors_out.append(mask_out.cpu()) + batch_count = mask_out.shape[0] + pbar.update(batch_count) + + tensors_out = torch.cat(tensors_out, dim=0) + tensors_out = torch.clamp(tensors_out, min=0.0, max=1.0) + return tensors_out, + +class CreateFluidMask: + + RETURN_TYPES = ("IMAGE", "MASK") + FUNCTION = "createfluidmask" + CATEGORY = "KJNodes/masking/generate" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "invert": ("BOOLEAN", {"default": False}), + "frames": ("INT", {"default": 1,"min": 1, "max": 4096, "step": 1}), + "width": ("INT", {"default": 256,"min": 16, "max": 4096, "step": 1}), + "height": ("INT", {"default": 256,"min": 16, "max": 4096, "step": 1}), + "inflow_count": ("INT", {"default": 3,"min": 0, "max": 255, "step": 1}), + "inflow_velocity": ("INT", {"default": 1,"min": 0, "max": 255, "step": 1}), + "inflow_radius": ("INT", {"default": 8,"min": 0, "max": 255, "step": 1}), + "inflow_padding": ("INT", {"default": 50,"min": 0, "max": 255, "step": 1}), + "inflow_duration": ("INT", {"default": 60,"min": 0, "max": 255, "step": 1}), + }, + } + #using code from https://github.com/GregTJ/stable-fluids + def createfluidmask(self, frames, width, height, invert, inflow_count, inflow_velocity, inflow_radius, inflow_padding, inflow_duration): + from ..utility.fluid import Fluid + try: + from scipy.special import erf + except: + from scipy.spatial import erf + out = [] + masks = [] + RESOLUTION = width, height + DURATION = frames + + INFLOW_PADDING = inflow_padding + INFLOW_DURATION = inflow_duration + INFLOW_RADIUS = inflow_radius + INFLOW_VELOCITY = inflow_velocity + INFLOW_COUNT = inflow_count + + print('Generating fluid solver, this may take some time.') + fluid = Fluid(RESOLUTION, 'dye') + + center = np.floor_divide(RESOLUTION, 2) + r = np.min(center) - INFLOW_PADDING + + points = np.linspace(-np.pi, np.pi, INFLOW_COUNT, endpoint=False) + points = tuple(np.array((np.cos(p), np.sin(p))) for p in points) + normals = tuple(-p for p in points) + points = tuple(r * p + center for p in points) + + inflow_velocity = np.zeros_like(fluid.velocity) + inflow_dye = np.zeros(fluid.shape) + for p, n in zip(points, normals): + mask = np.linalg.norm(fluid.indices - p[:, None, None], axis=0) <= INFLOW_RADIUS + inflow_velocity[:, mask] += n[:, None] * INFLOW_VELOCITY + inflow_dye[mask] = 1 + + + for f in range(DURATION): + print(f'Computing frame {f + 1} of {DURATION}.') + if f <= INFLOW_DURATION: + fluid.velocity += inflow_velocity + fluid.dye += inflow_dye + + curl = fluid.step()[1] + # Using the error function to make the contrast a bit higher. + # Any other sigmoid function e.g. smoothstep would work. + curl = (erf(curl * 2) + 1) / 4 + + color = np.dstack((curl, np.ones(fluid.shape), fluid.dye)) + color = (np.clip(color, 0, 1) * 255).astype('uint8') + image = np.array(color).astype(np.float32) / 255.0 + image = torch.from_numpy(image)[None,] + mask = image[:, :, :, 0] + masks.append(mask) + out.append(image) + + if invert: + return (1.0 - torch.cat(out, dim=0),1.0 - torch.cat(masks, dim=0),) + return (torch.cat(out, dim=0),torch.cat(masks, dim=0),) + +class CreateAudioMask: + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "createaudiomask" + CATEGORY = "KJNodes/deprecated" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "invert": ("BOOLEAN", {"default": False}), + "frames": ("INT", {"default": 16,"min": 1, "max": 255, "step": 1}), + "scale": ("FLOAT", {"default": 0.5,"min": 0.0, "max": 2.0, "step": 0.01}), + "audio_path": ("STRING", {"default": "audio.wav"}), + "width": ("INT", {"default": 256,"min": 16, "max": 4096, "step": 1}), + "height": ("INT", {"default": 256,"min": 16, "max": 4096, "step": 1}), + }, + } + + def createaudiomask(self, frames, width, height, invert, audio_path, scale): + try: + import librosa + except ImportError: + raise Exception("Can not import librosa. Install it with 'pip install librosa'") + batch_size = frames + out = [] + masks = [] + if audio_path == "audio.wav": #I don't know why relative path won't work otherwise... + audio_path = os.path.join(script_directory, audio_path) + audio, sr = librosa.load(audio_path) + spectrogram = np.abs(librosa.stft(audio)) + + for i in range(batch_size): + image = Image.new("RGB", (width, height), "black") + draw = ImageDraw.Draw(image) + frame = spectrogram[:, i] + circle_radius = int(height * np.mean(frame)) + circle_radius *= scale + circle_center = (width // 2, height // 2) # Calculate the center of the image + + draw.ellipse([(circle_center[0] - circle_radius, circle_center[1] - circle_radius), + (circle_center[0] + circle_radius, circle_center[1] + circle_radius)], + fill='white') + + image = np.array(image).astype(np.float32) / 255.0 + image = torch.from_numpy(image)[None,] + mask = image[:, :, :, 0] + masks.append(mask) + out.append(image) + + if invert: + return (1.0 - torch.cat(out, dim=0),) + return (torch.cat(out, dim=0),torch.cat(masks, dim=0),) + +class CreateGradientMask: + + RETURN_TYPES = ("MASK",) + FUNCTION = "createmask" + CATEGORY = "KJNodes/masking/generate" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "invert": ("BOOLEAN", {"default": False}), + "frames": ("INT", {"default": 0,"min": 0, "max": 255, "step": 1}), + "width": ("INT", {"default": 256,"min": 16, "max": 4096, "step": 1}), + "height": ("INT", {"default": 256,"min": 16, "max": 4096, "step": 1}), + }, + } + def createmask(self, frames, width, height, invert): + # Define the number of images in the batch + batch_size = frames + out = [] + # Create an empty array to store the image batch + image_batch = np.zeros((batch_size, height, width), dtype=np.float32) + # Generate the black to white gradient for each image + for i in range(batch_size): + gradient = np.linspace(1.0, 0.0, width, dtype=np.float32) + time = i / frames # Calculate the time variable + offset_gradient = gradient - time # Offset the gradient values based on time + image_batch[i] = offset_gradient.reshape(1, -1) + output = torch.from_numpy(image_batch) + mask = output + out.append(mask) + if invert: + return (1.0 - torch.cat(out, dim=0),) + return (torch.cat(out, dim=0),) + +class CreateFadeMask: + + RETURN_TYPES = ("MASK",) + FUNCTION = "createfademask" + CATEGORY = "KJNodes/deprecated" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "invert": ("BOOLEAN", {"default": False}), + "frames": ("INT", {"default": 2,"min": 2, "max": 10000, "step": 1}), + "width": ("INT", {"default": 256,"min": 16, "max": 4096, "step": 1}), + "height": ("INT", {"default": 256,"min": 16, "max": 4096, "step": 1}), + "interpolation": (["linear", "ease_in", "ease_out", "ease_in_out"],), + "start_level": ("FLOAT", {"default": 1.0,"min": 0.0, "max": 1.0, "step": 0.01}), + "midpoint_level": ("FLOAT", {"default": 0.5,"min": 0.0, "max": 1.0, "step": 0.01}), + "end_level": ("FLOAT", {"default": 0.0,"min": 0.0, "max": 1.0, "step": 0.01}), + "midpoint_frame": ("INT", {"default": 0,"min": 0, "max": 4096, "step": 1}), + }, + } + + def createfademask(self, frames, width, height, invert, interpolation, start_level, midpoint_level, end_level, midpoint_frame): + def ease_in(t): + return t * t + + def ease_out(t): + return 1 - (1 - t) * (1 - t) + + def ease_in_out(t): + return 3 * t * t - 2 * t * t * t + + batch_size = frames + out = [] + image_batch = np.zeros((batch_size, height, width), dtype=np.float32) + + if midpoint_frame == 0: + midpoint_frame = batch_size // 2 + + for i in range(batch_size): + if i <= midpoint_frame: + t = i / midpoint_frame + if interpolation == "ease_in": + t = ease_in(t) + elif interpolation == "ease_out": + t = ease_out(t) + elif interpolation == "ease_in_out": + t = ease_in_out(t) + color = start_level - t * (start_level - midpoint_level) + else: + t = (i - midpoint_frame) / (batch_size - midpoint_frame) + if interpolation == "ease_in": + t = ease_in(t) + elif interpolation == "ease_out": + t = ease_out(t) + elif interpolation == "ease_in_out": + t = ease_in_out(t) + color = midpoint_level - t * (midpoint_level - end_level) + + color = np.clip(color, 0, 255) + image = np.full((height, width), color, dtype=np.float32) + image_batch[i] = image + + output = torch.from_numpy(image_batch) + mask = output + out.append(mask) + + if invert: + return (1.0 - torch.cat(out, dim=0),) + return (torch.cat(out, dim=0),) + +class CreateFadeMaskAdvanced: + + RETURN_TYPES = ("MASK",) + FUNCTION = "createfademask" + CATEGORY = "KJNodes/masking/generate" + DESCRIPTION = """ +Create a batch of masks interpolated between given frames and values. +Uses same syntax as Fizz' BatchValueSchedule. +First value is the frame index (not that this starts from 0, not 1) +and the second value inside the brackets is the float value of the mask in range 0.0 - 1.0 + +For example the default values: +0:(0.0) +7:(1.0) +15:(0.0) + +Would create a mask batch fo 16 frames, starting from black, +interpolating with the chosen curve to fully white at the 8th frame, +and interpolating from that to fully black at the 16th frame. +""" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "points_string": ("STRING", {"default": "0:(0.0),\n7:(1.0),\n15:(0.0)\n", "multiline": True}), + "invert": ("BOOLEAN", {"default": False}), + "frames": ("INT", {"default": 16,"min": 2, "max": 10000, "step": 1}), + "width": ("INT", {"default": 512,"min": 1, "max": 4096, "step": 1}), + "height": ("INT", {"default": 512,"min": 1, "max": 4096, "step": 1}), + "interpolation": (["linear", "ease_in", "ease_out", "ease_in_out"],), + }, + } + + def createfademask(self, frames, width, height, invert, points_string, interpolation): + def ease_in(t): + return t * t + + def ease_out(t): + return 1 - (1 - t) * (1 - t) + + def ease_in_out(t): + return 3 * t * t - 2 * t * t * t + + # Parse the input string into a list of tuples + points = [] + points_string = points_string.rstrip(',\n') + for point_str in points_string.split(','): + frame_str, color_str = point_str.split(':') + frame = int(frame_str.strip()) + color = float(color_str.strip()[1:-1]) # Remove parentheses around color + points.append((frame, color)) + + # Check if the last frame is already in the points + if len(points) == 0 or points[-1][0] != frames - 1: + # If not, add it with the color of the last specified frame + points.append((frames - 1, points[-1][1] if points else 0)) + + # Sort the points by frame number + points.sort(key=lambda x: x[0]) + + batch_size = frames + out = [] + image_batch = np.zeros((batch_size, height, width), dtype=np.float32) + + # Index of the next point to interpolate towards + next_point = 1 + + for i in range(batch_size): + while next_point < len(points) and i > points[next_point][0]: + next_point += 1 + + # Interpolate between the previous point and the next point + prev_point = next_point - 1 + t = (i - points[prev_point][0]) / (points[next_point][0] - points[prev_point][0]) + if interpolation == "ease_in": + t = ease_in(t) + elif interpolation == "ease_out": + t = ease_out(t) + elif interpolation == "ease_in_out": + t = ease_in_out(t) + elif interpolation == "linear": + pass # No need to modify `t` for linear interpolation + + color = points[prev_point][1] - t * (points[prev_point][1] - points[next_point][1]) + color = np.clip(color, 0, 255) + image = np.full((height, width), color, dtype=np.float32) + image_batch[i] = image + + output = torch.from_numpy(image_batch) + mask = output + out.append(mask) + + if invert: + return (1.0 - torch.cat(out, dim=0),) + return (torch.cat(out, dim=0),) + +class CreateMagicMask: + + RETURN_TYPES = ("MASK", "MASK",) + RETURN_NAMES = ("mask", "mask_inverted",) + FUNCTION = "createmagicmask" + CATEGORY = "KJNodes/masking/generate" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "frames": ("INT", {"default": 16,"min": 2, "max": 4096, "step": 1}), + "depth": ("INT", {"default": 12,"min": 1, "max": 500, "step": 1}), + "distortion": ("FLOAT", {"default": 1.5,"min": 0.0, "max": 100.0, "step": 0.01}), + "seed": ("INT", {"default": 123,"min": 0, "max": 99999999, "step": 1}), + "transitions": ("INT", {"default": 1,"min": 1, "max": 20, "step": 1}), + "frame_width": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}), + "frame_height": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}), + }, + } + + def createmagicmask(self, frames, transitions, depth, distortion, seed, frame_width, frame_height): + from ..utility.magictex import coordinate_grid, random_transform, magic + import matplotlib.pyplot as plt + rng = np.random.default_rng(seed) + out = [] + coords = coordinate_grid((frame_width, frame_height)) + + # Calculate the number of frames for each transition + frames_per_transition = frames // transitions + + # Generate a base set of parameters + base_params = { + "coords": random_transform(coords, rng), + "depth": depth, + "distortion": distortion, + } + for t in range(transitions): + # Generate a second set of parameters that is at most max_diff away from the base parameters + params1 = base_params.copy() + params2 = base_params.copy() + + params1['coords'] = random_transform(coords, rng) + params2['coords'] = random_transform(coords, rng) + + for i in range(frames_per_transition): + # Compute the interpolation factor + alpha = i / frames_per_transition + + # Interpolate between the two sets of parameters + params = params1.copy() + params['coords'] = (1 - alpha) * params1['coords'] + alpha * params2['coords'] + + tex = magic(**params) + + dpi = frame_width / 10 + fig = plt.figure(figsize=(10, 10), dpi=dpi) + + ax = fig.add_subplot(111) + plt.subplots_adjust(left=0, right=1, bottom=0, top=1) + + ax.get_yaxis().set_ticks([]) + ax.get_xaxis().set_ticks([]) + ax.imshow(tex, aspect='auto') + + fig.canvas.draw() + img = np.array(fig.canvas.renderer._renderer) + + plt.close(fig) + + pil_img = Image.fromarray(img).convert("L") + mask = torch.tensor(np.array(pil_img)) / 255.0 + + out.append(mask) + + return (torch.stack(out, dim=0), 1.0 - torch.stack(out, dim=0),) + +class CreateShapeMask: + + RETURN_TYPES = ("MASK", "MASK",) + RETURN_NAMES = ("mask", "mask_inverted",) + FUNCTION = "createshapemask" + CATEGORY = "KJNodes/masking/generate" + DESCRIPTION = """ +Creates a mask or batch of masks with the specified shape. +Locations are center locations. +Grow value is the amount to grow the shape on each frame, creating animated masks. +""" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "shape": ( + [ 'circle', + 'square', + 'triangle', + ], + { + "default": 'circle' + }), + "frames": ("INT", {"default": 1,"min": 1, "max": 4096, "step": 1}), + "location_x": ("INT", {"default": 256,"min": 0, "max": 4096, "step": 1}), + "location_y": ("INT", {"default": 256,"min": 0, "max": 4096, "step": 1}), + "grow": ("INT", {"default": 0, "min": -512, "max": 512, "step": 1}), + "frame_width": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}), + "frame_height": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}), + "shape_width": ("INT", {"default": 128,"min": 8, "max": 4096, "step": 1}), + "shape_height": ("INT", {"default": 128,"min": 8, "max": 4096, "step": 1}), + }, + } + + def createshapemask(self, frames, frame_width, frame_height, location_x, location_y, shape_width, shape_height, grow, shape): + # Define the number of images in the batch + batch_size = frames + out = [] + color = "white" + for i in range(batch_size): + image = Image.new("RGB", (frame_width, frame_height), "black") + draw = ImageDraw.Draw(image) + + # Calculate the size for this frame and ensure it's not less than 0 + current_width = max(0, shape_width + i*grow) + current_height = max(0, shape_height + i*grow) + + if shape == 'circle' or shape == 'square': + # Define the bounding box for the shape + left_up_point = (location_x - current_width // 2, location_y - current_height // 2) + right_down_point = (location_x + current_width // 2, location_y + current_height // 2) + two_points = [left_up_point, right_down_point] + + if shape == 'circle': + draw.ellipse(two_points, fill=color) + elif shape == 'square': + draw.rectangle(two_points, fill=color) + + elif shape == 'triangle': + # Define the points for the triangle + left_up_point = (location_x - current_width // 2, location_y + current_height // 2) # bottom left + right_down_point = (location_x + current_width // 2, location_y + current_height // 2) # bottom right + top_point = (location_x, location_y - current_height // 2) # top point + draw.polygon([top_point, left_up_point, right_down_point], fill=color) + + image = pil2tensor(image) + mask = image[:, :, :, 0] + out.append(mask) + outstack = torch.cat(out, dim=0) + return (outstack, 1.0 - outstack,) + +class CreateVoronoiMask: + + RETURN_TYPES = ("MASK", "MASK",) + RETURN_NAMES = ("mask", "mask_inverted",) + FUNCTION = "createvoronoi" + CATEGORY = "KJNodes/masking/generate" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "frames": ("INT", {"default": 16,"min": 2, "max": 4096, "step": 1}), + "num_points": ("INT", {"default": 15,"min": 1, "max": 4096, "step": 1}), + "line_width": ("INT", {"default": 4,"min": 1, "max": 4096, "step": 1}), + "speed": ("FLOAT", {"default": 0.5,"min": 0.0, "max": 1.0, "step": 0.01}), + "frame_width": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}), + "frame_height": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}), + }, + } + + def createvoronoi(self, frames, num_points, line_width, speed, frame_width, frame_height): + from scipy.spatial import Voronoi + # Define the number of images in the batch + batch_size = frames + out = [] + + # Calculate aspect ratio + aspect_ratio = frame_width / frame_height + + # Create start and end points for each point, considering the aspect ratio + start_points = np.random.rand(num_points, 2) + start_points[:, 0] *= aspect_ratio + + end_points = np.random.rand(num_points, 2) + end_points[:, 0] *= aspect_ratio + + for i in range(batch_size): + # Interpolate the points' positions based on the current frame + t = (i * speed) / (batch_size - 1) # normalize to [0, 1] over the frames + t = np.clip(t, 0, 1) # ensure t is in [0, 1] + points = (1 - t) * start_points + t * end_points # lerp + + # Adjust points for aspect ratio + points[:, 0] *= aspect_ratio + + vor = Voronoi(points) + + # Create a blank image with a white background + fig, ax = plt.subplots() + plt.subplots_adjust(left=0, right=1, bottom=0, top=1) + ax.set_xlim([0, aspect_ratio]); ax.set_ylim([0, 1]) # adjust x limits + ax.axis('off') + ax.margins(0, 0) + fig.set_size_inches(aspect_ratio * frame_height/100, frame_height/100) # adjust figure size + ax.fill_between([0, 1], [0, 1], color='white') + + # Plot each Voronoi ridge + for simplex in vor.ridge_vertices: + simplex = np.asarray(simplex) + if np.all(simplex >= 0): + plt.plot(vor.vertices[simplex, 0], vor.vertices[simplex, 1], 'k-', linewidth=line_width) + + fig.canvas.draw() + img = np.array(fig.canvas.renderer._renderer) + + plt.close(fig) + + pil_img = Image.fromarray(img).convert("L") + mask = torch.tensor(np.array(pil_img)) / 255.0 + + out.append(mask) + + return (torch.stack(out, dim=0), 1.0 - torch.stack(out, dim=0),) + +class GetMaskSizeAndCount: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "mask": ("MASK",), + }} + + RETURN_TYPES = ("MASK","INT", "INT", "INT",) + RETURN_NAMES = ("mask", "width", "height", "count",) + FUNCTION = "getsize" + CATEGORY = "KJNodes/masking" + DESCRIPTION = """ +Returns the width, height and batch size of the mask, +and passes it through unchanged. + +""" + + def getsize(self, mask): + width = mask.shape[2] + height = mask.shape[1] + count = mask.shape[0] + return {"ui": { + "text": [f"{count}x{width}x{height}"]}, + "result": (mask, width, height, count) + } + +class GrowMaskWithBlur: + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "mask": ("MASK",), + "expand": ("INT", {"default": 0, "min": -MAX_RESOLUTION, "max": MAX_RESOLUTION, "step": 1}), + "incremental_expandrate": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 100.0, "step": 0.1}), + "tapered_corners": ("BOOLEAN", {"default": True}), + "flip_input": ("BOOLEAN", {"default": False}), + "blur_radius": ("FLOAT", { + "default": 0.0, + "min": 0.0, + "max": 100, + "step": 0.1 + }), + "lerp_alpha": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}), + "decay_factor": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}), + }, + "optional": { + "fill_holes": ("BOOLEAN", {"default": False}), + }, + } + + CATEGORY = "KJNodes/masking" + RETURN_TYPES = ("MASK", "MASK",) + RETURN_NAMES = ("mask", "mask_inverted",) + FUNCTION = "expand_mask" + DESCRIPTION = """ +# GrowMaskWithBlur +- mask: Input mask or mask batch +- expand: Expand or contract mask or mask batch by a given amount +- incremental_expandrate: increase expand rate by a given amount per frame +- tapered_corners: use tapered corners +- flip_input: flip input mask +- blur_radius: value higher than 0 will blur the mask +- lerp_alpha: alpha value for interpolation between frames +- decay_factor: decay value for interpolation between frames +- fill_holes: fill holes in the mask (slow)""" + + def expand_mask(self, mask, expand, tapered_corners, flip_input, blur_radius, incremental_expandrate, lerp_alpha, decay_factor, fill_holes=False): + alpha = lerp_alpha + decay = decay_factor + if flip_input: + mask = 1.0 - mask + c = 0 if tapered_corners else 1 + kernel = np.array([[c, 1, c], + [1, 1, 1], + [c, 1, c]]) + growmask = mask.reshape((-1, mask.shape[-2], mask.shape[-1])).cpu() + out = [] + previous_output = None + current_expand = expand + for m in growmask: + output = m.numpy().astype(np.float32) + for _ in range(abs(round(current_expand))): + if current_expand < 0: + output = scipy.ndimage.grey_erosion(output, footprint=kernel) + else: + output = scipy.ndimage.grey_dilation(output, footprint=kernel) + if current_expand < 0: + current_expand -= abs(incremental_expandrate) + else: + current_expand += abs(incremental_expandrate) + if fill_holes: + binary_mask = output > 0 + output = scipy.ndimage.binary_fill_holes(binary_mask) + output = output.astype(np.float32) * 255 + output = torch.from_numpy(output) + if alpha < 1.0 and previous_output is not None: + # Interpolate between the previous and current frame + output = alpha * output + (1 - alpha) * previous_output + if decay < 1.0 and previous_output is not None: + # Add the decayed previous output to the current frame + output += decay * previous_output + output = output / output.max() + previous_output = output + out.append(output) + + if blur_radius != 0: + # Convert the tensor list to PIL images, apply blur, and convert back + for idx, tensor in enumerate(out): + # Convert tensor to PIL image + pil_image = tensor2pil(tensor.cpu().detach())[0] + # Apply Gaussian blur + pil_image = pil_image.filter(ImageFilter.GaussianBlur(blur_radius)) + # Convert back to tensor + out[idx] = pil2tensor(pil_image) + blurred = torch.cat(out, dim=0) + return (blurred, 1.0 - blurred) + else: + return (torch.stack(out, dim=0), 1.0 - torch.stack(out, dim=0),) + +class MaskBatchMulti: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "inputcount": ("INT", {"default": 2, "min": 2, "max": 1000, "step": 1}), + "mask_1": ("MASK", ), + "mask_2": ("MASK", ), + }, + } + + RETURN_TYPES = ("MASK",) + RETURN_NAMES = ("masks",) + FUNCTION = "combine" + CATEGORY = "KJNodes/masking" + DESCRIPTION = """ +Creates an image batch from multiple masks. +You can set how many inputs the node has, +with the **inputcount** and clicking update. +""" + + def combine(self, inputcount, **kwargs): + mask = kwargs["mask_1"] + for c in range(1, inputcount): + new_mask = kwargs[f"mask_{c + 1}"] + if mask.shape[1:] != new_mask.shape[1:]: + new_mask = F.interpolate(new_mask.unsqueeze(1), size=(mask.shape[1], mask.shape[2]), mode="bicubic").squeeze(1) + mask = torch.cat((mask, new_mask), dim=0) + return (mask,) + +class OffsetMask: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "mask": ("MASK",), + "x": ("INT", { "default": 0, "min": -4096, "max": MAX_RESOLUTION, "step": 1, "display": "number" }), + "y": ("INT", { "default": 0, "min": -4096, "max": MAX_RESOLUTION, "step": 1, "display": "number" }), + "angle": ("INT", { "default": 0, "min": -360, "max": 360, "step": 1, "display": "number" }), + "duplication_factor": ("INT", { "default": 1, "min": 1, "max": 1000, "step": 1, "display": "number" }), + "roll": ("BOOLEAN", { "default": False }), + "incremental": ("BOOLEAN", { "default": False }), + "padding_mode": ( + [ + 'empty', + 'border', + 'reflection', + + ], { + "default": 'empty' + }), + } + } + + RETURN_TYPES = ("MASK",) + RETURN_NAMES = ("mask",) + FUNCTION = "offset" + CATEGORY = "KJNodes/masking" + DESCRIPTION = """ +Offsets the mask by the specified amount. + - mask: Input mask or mask batch + - x: Horizontal offset + - y: Vertical offset + - angle: Angle in degrees + - roll: roll edge wrapping + - duplication_factor: Number of times to duplicate the mask to form a batch + - border padding_mode: Padding mode for the mask +""" + + def offset(self, mask, x, y, angle, roll=False, incremental=False, duplication_factor=1, padding_mode="empty"): + # Create duplicates of the mask batch + mask = mask.repeat(duplication_factor, 1, 1).clone() + + batch_size, height, width = mask.shape + + if angle != 0 and incremental: + for i in range(batch_size): + rotation_angle = angle * (i+1) + mask[i] = TF.rotate(mask[i].unsqueeze(0), rotation_angle).squeeze(0) + elif angle > 0: + for i in range(batch_size): + mask[i] = TF.rotate(mask[i].unsqueeze(0), angle).squeeze(0) + + if roll: + if incremental: + for i in range(batch_size): + shift_x = min(x*(i+1), width-1) + shift_y = min(y*(i+1), height-1) + if shift_x != 0: + mask[i] = torch.roll(mask[i], shifts=shift_x, dims=1) + if shift_y != 0: + mask[i] = torch.roll(mask[i], shifts=shift_y, dims=0) + else: + shift_x = min(x, width-1) + shift_y = min(y, height-1) + if shift_x != 0: + mask = torch.roll(mask, shifts=shift_x, dims=2) + if shift_y != 0: + mask = torch.roll(mask, shifts=shift_y, dims=1) + else: + + for i in range(batch_size): + if incremental: + temp_x = min(x * (i+1), width-1) + temp_y = min(y * (i+1), height-1) + else: + temp_x = min(x, width-1) + temp_y = min(y, height-1) + if temp_x > 0: + if padding_mode == 'empty': + mask[i] = torch.cat([torch.zeros((height, temp_x)), mask[i, :, :-temp_x]], dim=1) + elif padding_mode in ['replicate', 'reflect']: + mask[i] = F.pad(mask[i, :, :-temp_x], (0, temp_x), mode=padding_mode) + elif temp_x < 0: + if padding_mode == 'empty': + mask[i] = torch.cat([mask[i, :, :temp_x], torch.zeros((height, -temp_x))], dim=1) + elif padding_mode in ['replicate', 'reflect']: + mask[i] = F.pad(mask[i, :, -temp_x:], (temp_x, 0), mode=padding_mode) + + if temp_y > 0: + if padding_mode == 'empty': + mask[i] = torch.cat([torch.zeros((temp_y, width)), mask[i, :-temp_y, :]], dim=0) + elif padding_mode in ['replicate', 'reflect']: + mask[i] = F.pad(mask[i, :-temp_y, :], (0, temp_y), mode=padding_mode) + elif temp_y < 0: + if padding_mode == 'empty': + mask[i] = torch.cat([mask[i, :temp_y, :], torch.zeros((-temp_y, width))], dim=0) + elif padding_mode in ['replicate', 'reflect']: + mask[i] = F.pad(mask[i, -temp_y:, :], (temp_y, 0), mode=padding_mode) + + return mask, + +class RoundMask: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "mask": ("MASK",), + }} + + RETURN_TYPES = ("MASK",) + FUNCTION = "round" + CATEGORY = "KJNodes/masking" + DESCRIPTION = """ +Rounds the mask or batch of masks to a binary mask. +RoundMask example + +""" + + def round(self, mask): + mask = mask.round() + return (mask,) + +class ResizeMask: + upscale_methods = ["nearest-exact", "bilinear", "area", "bicubic", "lanczos"] + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "mask": ("MASK",), + "width": ("INT", { "default": 512, "min": 0, "max": MAX_RESOLUTION, "step": 1, "display": "number" }), + "height": ("INT", { "default": 512, "min": 0, "max": MAX_RESOLUTION, "step": 1, "display": "number" }), + "keep_proportions": ("BOOLEAN", { "default": False }), + "upscale_method": (s.upscale_methods,), + "crop": (["disabled","center"],), + } + } + + RETURN_TYPES = ("MASK", "INT", "INT",) + RETURN_NAMES = ("mask", "width", "height",) + FUNCTION = "resize" + CATEGORY = "KJNodes/masking" + DESCRIPTION = """ +Resizes the mask or batch of masks to the specified width and height. +""" + + def resize(self, mask, width, height, keep_proportions, upscale_method,crop): + if keep_proportions: + _, oh, ow = mask.shape + width = ow if width == 0 else width + height = oh if height == 0 else height + ratio = min(width / ow, height / oh) + width = round(ow*ratio) + height = round(oh*ratio) + outputs = mask.unsqueeze(1) + outputs = common_upscale(outputs, width, height, upscale_method, crop) + outputs = outputs.squeeze(1) + + return(outputs, outputs.shape[2], outputs.shape[1],) + +class RemapMaskRange: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "mask": ("MASK",), + "min": ("FLOAT", {"default": 0.0,"min": -10.0, "max": 1.0, "step": 0.01}), + "max": ("FLOAT", {"default": 1.0,"min": 0.0, "max": 10.0, "step": 0.01}), + } + } + + RETURN_TYPES = ("MASK",) + RETURN_NAMES = ("mask",) + FUNCTION = "remap" + CATEGORY = "KJNodes/masking" + DESCRIPTION = """ +Sets new min and max values for the mask. +""" + + def remap(self, mask, min, max): + + # Find the maximum value in the mask + mask_max = torch.max(mask) + + # If the maximum mask value is zero, avoid division by zero by setting it to 1 + mask_max = mask_max if mask_max > 0 else 1 + + # Scale the mask values to the new range defined by min and max + # The highest pixel value in the mask will be scaled to max + scaled_mask = (mask / mask_max) * (max - min) + min + + # Clamp the values to ensure they are within [0.0, 1.0] + scaled_mask = torch.clamp(scaled_mask, min=0.0, max=1.0) + + return (scaled_mask, ) + + +def get_mask_polygon(self, mask_np): + import cv2 + """Helper function to get polygon points from mask""" + # Find contours + contours, _ = cv2.findContours(mask_np, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) + + if not contours: + return None + + # Get the largest contour + largest_contour = max(contours, key=cv2.contourArea) + + # Approximate polygon + epsilon = 0.02 * cv2.arcLength(largest_contour, True) + polygon = cv2.approxPolyDP(largest_contour, epsilon, True) + + return polygon.squeeze() + +import cv2 +class SeparateMasks: + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "mask": ("MASK", ), + "size_threshold_width" : ("INT", {"default": 256, "min": 0.0, "max": 4096, "step": 1}), + "size_threshold_height" : ("INT", {"default": 256, "min": 0.0, "max": 4096, "step": 1}), + "mode": (["convex_polygons", "area"],), + "max_poly_points": ("INT", {"default": 8, "min": 3, "max": 32, "step": 1}), + + }, + } + + RETURN_TYPES = ("MASK",) + RETURN_NAMES = ("mask",) + FUNCTION = "separate" + CATEGORY = "KJNodes/masking" + OUTPUT_NODE = True + DESCRIPTION = "Separates a mask into multiple masks based on the size of the connected components." + + def polygon_to_mask(self, polygon, shape): + mask = np.zeros((shape[0], shape[1]), dtype=np.uint8) # Fixed shape handling + + if len(polygon.shape) == 2: # Check if polygon points are valid + polygon = polygon.astype(np.int32) + cv2.fillPoly(mask, [polygon], 1) + return mask + + def get_mask_polygon(self, mask_np, max_points): + contours, _ = cv2.findContours(mask_np, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) + if not contours: + return None + + largest_contour = max(contours, key=cv2.contourArea) + hull = cv2.convexHull(largest_contour) + + # Initialize with smaller epsilon for more points + perimeter = cv2.arcLength(hull, True) + epsilon = perimeter * 0.01 # Start smaller + + min_eps = perimeter * 0.001 # Much smaller minimum + max_eps = perimeter * 0.2 # Smaller maximum + + best_approx = None + best_diff = float('inf') + max_iterations = 20 + + #print(f"Target points: {max_points}, Perimeter: {perimeter}") + + for i in range(max_iterations): + curr_eps = (min_eps + max_eps) / 2 + approx = cv2.approxPolyDP(hull, curr_eps, True) + points_diff = len(approx) - max_points + + #print(f"Iteration {i}: points={len(approx)}, eps={curr_eps:.4f}") + + if abs(points_diff) < best_diff: + best_approx = approx + best_diff = abs(points_diff) + + if len(approx) > max_points: + min_eps = curr_eps * 1.1 # More gradual adjustment + elif len(approx) < max_points: + max_eps = curr_eps * 0.9 # More gradual adjustment + else: + return approx.squeeze() + + if abs(max_eps - min_eps) < perimeter * 0.0001: # Relative tolerance + break + + # If we didn't find exact match, return best approximation + return best_approx.squeeze() if best_approx is not None else hull.squeeze() + + def separate(self, mask: torch.Tensor, size_threshold_width: int, size_threshold_height: int, max_poly_points: int, mode: str): + from scipy.ndimage import label, center_of_mass + import numpy as np + + B, H, W = mask.shape + separated = [] + + mask = mask.round() + + for b in range(B): + mask_np = mask[b].cpu().numpy().astype(np.uint8) + structure = np.ones((3, 3), dtype=np.int8) + labeled, ncomponents = label(mask_np, structure=structure) + pbar = ProgressBar(ncomponents) + + for component in range(1, ncomponents + 1): + component_mask_np = (labeled == component).astype(np.uint8) + + rows = np.any(component_mask_np, axis=1) + cols = np.any(component_mask_np, axis=0) + y_min, y_max = np.where(rows)[0][[0, -1]] + x_min, x_max = np.where(cols)[0][[0, -1]] + + width = x_max - x_min + 1 + height = y_max - y_min + 1 + centroid_x = (x_min + x_max) / 2 # Calculate x centroid + print(f"Component {component}: width={width}, height={height}, x_pos={centroid_x}") + + if width >= size_threshold_width and height >= size_threshold_height: + if mode != "area": + polygon = self.get_mask_polygon(component_mask_np, max_poly_points) + if polygon is not None: + poly_mask = self.polygon_to_mask(polygon, (H, W)) + poly_mask = torch.tensor(poly_mask, device=mask.device) + separated.append((centroid_x, poly_mask)) + else: + area_mask = torch.tensor(component_mask_np, device=mask.device) + separated.append((centroid_x, area_mask)) + pbar.update(1) + + if len(separated) > 0: + # Sort by x position and extract only the masks + separated.sort(key=lambda x: x[0]) + separated = [x[1] for x in separated] + out_masks = torch.stack(separated, dim=0) + return out_masks, + else: + return torch.empty((1, 64, 64), device=mask.device), + \ No newline at end of file diff --git a/custom_nodes/ComfyUI-KJNodes-main/nodes/model_optimization_nodes.py b/custom_nodes/ComfyUI-KJNodes-main/nodes/model_optimization_nodes.py new file mode 100644 index 0000000000000000000000000000000000000000..ac7c29632e2fd016e1f8f55932bdee53ba445780 --- /dev/null +++ b/custom_nodes/ComfyUI-KJNodes-main/nodes/model_optimization_nodes.py @@ -0,0 +1,1179 @@ +from comfy.ldm.modules import attention as comfy_attention +import logging +import comfy.model_patcher +import comfy.utils +import comfy.sd +import torch +import folder_paths +import comfy.model_management as mm +from comfy.cli_args import args + +orig_attention = comfy_attention.optimized_attention +original_patch_model = comfy.model_patcher.ModelPatcher.patch_model +original_load_lora_for_models = comfy.sd.load_lora_for_models + +class BaseLoaderKJ: + original_linear = None + cublas_patched = False + + def _patch_modules(self, patch_cublaslinear, sage_attention): + from comfy.ops import disable_weight_init, CastWeightBiasOp, cast_bias_weight + + if sage_attention != "disabled": + print("Patching comfy attention to use sageattn") + from sageattention import sageattn + def set_sage_func(sage_attention): + if sage_attention == "auto": + def func(q, k, v, is_causal=False, attn_mask=None, tensor_layout="NHD"): + return sageattn(q, k, v, is_causal=is_causal, attn_mask=attn_mask, tensor_layout=tensor_layout) + return func + elif sage_attention == "sageattn_qk_int8_pv_fp16_cuda": + from sageattention import sageattn_qk_int8_pv_fp16_cuda + def func(q, k, v, is_causal=False, attn_mask=None, tensor_layout="NHD"): + return sageattn_qk_int8_pv_fp16_cuda(q, k, v, is_causal=is_causal, attn_mask=attn_mask, pv_accum_dtype="fp32", tensor_layout=tensor_layout) + return func + elif sage_attention == "sageattn_qk_int8_pv_fp16_triton": + from sageattention import sageattn_qk_int8_pv_fp16_triton + def func(q, k, v, is_causal=False, attn_mask=None, tensor_layout="NHD"): + return sageattn_qk_int8_pv_fp16_triton(q, k, v, is_causal=is_causal, attn_mask=attn_mask, tensor_layout=tensor_layout) + return func + elif sage_attention == "sageattn_qk_int8_pv_fp8_cuda": + from sageattention import sageattn_qk_int8_pv_fp8_cuda + def func(q, k, v, is_causal=False, attn_mask=None, tensor_layout="NHD"): + return sageattn_qk_int8_pv_fp8_cuda(q, k, v, is_causal=is_causal, attn_mask=attn_mask, pv_accum_dtype="fp32+fp32", tensor_layout=tensor_layout) + return func + + sage_func = set_sage_func(sage_attention) + + @torch.compiler.disable() + def attention_sage(q, k, v, heads, mask=None, attn_precision=None, skip_reshape=False, skip_output_reshape=False): + if skip_reshape: + b, _, _, dim_head = q.shape + tensor_layout="HND" + else: + b, _, dim_head = q.shape + dim_head //= heads + q, k, v = map( + lambda t: t.view(b, -1, heads, dim_head), + (q, k, v), + ) + tensor_layout="NHD" + if mask is not None: + # add a batch dimension if there isn't already one + if mask.ndim == 2: + mask = mask.unsqueeze(0) + # add a heads dimension if there isn't already one + if mask.ndim == 3: + mask = mask.unsqueeze(1) + out = sage_func(q, k, v, attn_mask=mask, is_causal=False, tensor_layout=tensor_layout) + if tensor_layout == "HND": + if not skip_output_reshape: + out = ( + out.transpose(1, 2).reshape(b, -1, heads * dim_head) + ) + else: + if skip_output_reshape: + out = out.transpose(1, 2) + else: + out = out.reshape(b, -1, heads * dim_head) + return out + + comfy_attention.optimized_attention = attention_sage + comfy.ldm.hunyuan_video.model.optimized_attention = attention_sage + comfy.ldm.flux.math.optimized_attention = attention_sage + comfy.ldm.genmo.joint_model.asymm_models_joint.optimized_attention = attention_sage + comfy.ldm.cosmos.blocks.optimized_attention = attention_sage + comfy.ldm.wan.model.optimized_attention = attention_sage + + else: + comfy_attention.optimized_attention = orig_attention + comfy.ldm.hunyuan_video.model.optimized_attention = orig_attention + comfy.ldm.flux.math.optimized_attention = orig_attention + comfy.ldm.genmo.joint_model.asymm_models_joint.optimized_attention = orig_attention + comfy.ldm.cosmos.blocks.optimized_attention = orig_attention + comfy.ldm.wan.model.optimized_attention = orig_attention + + if patch_cublaslinear: + if not BaseLoaderKJ.cublas_patched: + BaseLoaderKJ.original_linear = disable_weight_init.Linear + try: + from cublas_ops import CublasLinear + except ImportError: + raise Exception("Can't import 'torch-cublas-hgemm', install it from here https://github.com/aredden/torch-cublas-hgemm") + + class PatchedLinear(CublasLinear, CastWeightBiasOp): + def reset_parameters(self): + pass + + def forward_comfy_cast_weights(self, input): + weight, bias = cast_bias_weight(self, input) + return torch.nn.functional.linear(input, weight, bias) + + def forward(self, *args, **kwargs): + if self.comfy_cast_weights: + return self.forward_comfy_cast_weights(*args, **kwargs) + else: + return super().forward(*args, **kwargs) + + disable_weight_init.Linear = PatchedLinear + BaseLoaderKJ.cublas_patched = True + else: + if BaseLoaderKJ.cublas_patched: + disable_weight_init.Linear = BaseLoaderKJ.original_linear + BaseLoaderKJ.cublas_patched = False + +class PathchSageAttentionKJ(BaseLoaderKJ): + @classmethod + def INPUT_TYPES(s): + return {"required": { + "model": ("MODEL",), + "sage_attention": (["disabled", "auto", "sageattn_qk_int8_pv_fp16_cuda", "sageattn_qk_int8_pv_fp16_triton", "sageattn_qk_int8_pv_fp8_cuda"], {"default": False, "tooltip": "Global patch comfy attention to use sageattn, once patched to revert back to normal you would need to run this node again with disabled option."}), + }} + + RETURN_TYPES = ("MODEL", ) + FUNCTION = "patch" + DESCRIPTION = "Experimental node for patching attention mode. This doesn't use the model patching system and thus can't be disabled without running the node again with 'disabled' option." + EXPERIMENTAL = True + CATEGORY = "KJNodes/experimental" + + def patch(self, model, sage_attention): + self._patch_modules(False, sage_attention) + return model, + +class CheckpointLoaderKJ(BaseLoaderKJ): + @classmethod + def INPUT_TYPES(s): + return {"required": { + "ckpt_name": (folder_paths.get_filename_list("checkpoints"), {"tooltip": "The name of the checkpoint (model) to load."}), + "patch_cublaslinear": ("BOOLEAN", {"default": False, "tooltip": "Enable or disable the patching, won't take effect on already loaded models!"}), + "sage_attention": (["disabled", "auto", "sageattn_qk_int8_pv_fp16_cuda", "sageattn_qk_int8_pv_fp16_triton", "sageattn_qk_int8_pv_fp8_cuda"], {"default": False, "tooltip": "Patch comfy attention to use sageattn."}), + }} + + RETURN_TYPES = ("MODEL", "CLIP", "VAE") + FUNCTION = "patch" + OUTPUT_NODE = True + DESCRIPTION = "Experimental node for patching torch.nn.Linear with CublasLinear." + EXPERIMENTAL = True + CATEGORY = "KJNodes/experimental" + + def patch(self, ckpt_name, patch_cublaslinear, sage_attention): + self._patch_modules(patch_cublaslinear, sage_attention) + from nodes import CheckpointLoaderSimple + model, clip, vae = CheckpointLoaderSimple.load_checkpoint(self, ckpt_name) + return model, clip, vae + +class DiffusionModelLoaderKJ(BaseLoaderKJ): + @classmethod + def INPUT_TYPES(s): + return {"required": { + "model_name": (folder_paths.get_filename_list("diffusion_models"), {"tooltip": "The name of the checkpoint (model) to load."}), + "weight_dtype": (["default", "fp8_e4m3fn", "fp8_e4m3fn_fast", "fp8_e5m2", "fp16", "bf16", "fp32"],), + "compute_dtype": (["default", "fp16", "bf16", "fp32"], {"default": "fp16", "tooltip": "The compute dtype to use for the model."}), + "patch_cublaslinear": ("BOOLEAN", {"default": False, "tooltip": "Enable or disable the patching, won't take effect on already loaded models!"}), + "sage_attention": (["disabled", "auto", "sageattn_qk_int8_pv_fp16_cuda", "sageattn_qk_int8_pv_fp16_triton", "sageattn_qk_int8_pv_fp8_cuda"], {"default": False, "tooltip": "Patch comfy attention to use sageattn."}), + "enable_fp16_accumulation": ("BOOLEAN", {"default": False, "tooltip": "Enable torch.backends.cuda.matmul.allow_fp16_accumulation, requires pytorch 2.7.0 nightly."}), + }} + + RETURN_TYPES = ("MODEL",) + FUNCTION = "patch_and_load" + OUTPUT_NODE = True + DESCRIPTION = "Node for patching torch.nn.Linear with CublasLinear." + EXPERIMENTAL = True + CATEGORY = "KJNodes/experimental" + + def patch_and_load(self, model_name, weight_dtype, compute_dtype, patch_cublaslinear, sage_attention, enable_fp16_accumulation): + DTYPE_MAP = { + "fp8_e4m3fn": torch.float8_e4m3fn, + "fp8_e5m2": torch.float8_e5m2, + "fp16": torch.float16, + "bf16": torch.bfloat16, + "fp32": torch.float32 + } + model_options = {} + if dtype := DTYPE_MAP.get(weight_dtype): + model_options["dtype"] = dtype + print(f"Setting {model_name} weight dtype to {dtype}") + + if weight_dtype == "fp8_e4m3fn_fast": + model_options["dtype"] = torch.float8_e4m3fn + model_options["fp8_optimizations"] = True + + if enable_fp16_accumulation: + if hasattr(torch.backends.cuda.matmul, "allow_fp16_accumulation"): + torch.backends.cuda.matmul.allow_fp16_accumulation = True + else: + raise RuntimeError("Failed to set fp16 accumulation, this requires pytorch 2.7.0 nightly currently") + else: + if hasattr(torch.backends.cuda.matmul, "allow_fp16_accumulation"): + torch.backends.cuda.matmul.allow_fp16_accumulation = False + + unet_path = folder_paths.get_full_path_or_raise("diffusion_models", model_name) + model = comfy.sd.load_diffusion_model(unet_path, model_options=model_options) + if dtype := DTYPE_MAP.get(compute_dtype): + model.set_model_compute_dtype(dtype) + model.force_cast_weights = False + print(f"Setting {model_name} compute dtype to {dtype}") + self._patch_modules(patch_cublaslinear, sage_attention) + + return (model,) + +def patched_patch_model(self, device_to=None, lowvram_model_memory=0, load_weights=True, force_patch_weights=False): + with self.use_ejected(): + + device_to = mm.get_torch_device() + + full_load_override = getattr(self.model, "full_load_override", "auto") + if full_load_override in ["enabled", "disabled"]: + full_load = full_load_override == "enabled" + else: + full_load = lowvram_model_memory == 0 + + self.load(device_to, lowvram_model_memory=lowvram_model_memory, force_patch_weights=force_patch_weights, full_load=full_load) + + for k in self.object_patches: + old = comfy.utils.set_attr(self.model, k, self.object_patches[k]) + if k not in self.object_patches_backup: + self.object_patches_backup[k] = old + + self.inject_model() + return self.model + +def patched_load_lora_for_models(model, clip, lora, strength_model, strength_clip): + + patch_keys = list(model.object_patches_backup.keys()) + for k in patch_keys: + #print("backing up object patch: ", k) + comfy.utils.set_attr(model.model, k, model.object_patches_backup[k]) + + key_map = {} + if model is not None: + key_map = comfy.lora.model_lora_keys_unet(model.model, key_map) + if clip is not None: + key_map = comfy.lora.model_lora_keys_clip(clip.cond_stage_model, key_map) + + lora = comfy.lora_convert.convert_lora(lora) + loaded = comfy.lora.load_lora(lora, key_map) + #print(temp_object_patches_backup) + + if model is not None: + new_modelpatcher = model.clone() + k = new_modelpatcher.add_patches(loaded, strength_model) + else: + k = () + new_modelpatcher = None + + if clip is not None: + new_clip = clip.clone() + k1 = new_clip.add_patches(loaded, strength_clip) + else: + k1 = () + new_clip = None + k = set(k) + k1 = set(k1) + for x in loaded: + if (x not in k) and (x not in k1): + print("NOT LOADED {}".format(x)) + + if patch_keys: + if hasattr(model.model, "compile_settings"): + compile_settings = getattr(model.model, "compile_settings") + print("compile_settings: ", compile_settings) + for k in patch_keys: + if "diffusion_model." in k: + # Remove the prefix to get the attribute path + key = k.replace('diffusion_model.', '') + attributes = key.split('.') + # Start with the diffusion_model object + block = model.get_model_object("diffusion_model") + # Navigate through the attributes to get to the block + for attr in attributes: + if attr.isdigit(): + block = block[int(attr)] + else: + block = getattr(block, attr) + # Compile the block + compiled_block = torch.compile(block, mode=compile_settings["mode"], dynamic=compile_settings["dynamic"], fullgraph=compile_settings["fullgraph"], backend=compile_settings["backend"]) + # Add the compiled block back as an object patch + model.add_object_patch(k, compiled_block) + return (new_modelpatcher, new_clip) + +class PatchModelPatcherOrder: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "model": ("MODEL",), + "patch_order": (["object_patch_first", "weight_patch_first"], {"default": "weight_patch_first", "tooltip": "Patch the comfy patch_model function to load weight patches (LoRAs) before compiling the model"}), + "full_load": (["enabled", "disabled", "auto"], {"default": "auto", "tooltip": "Disabling may help with memory issues when loading large models, when changing this you should probably force model reload to avoid issues!"}), + }} + RETURN_TYPES = ("MODEL",) + FUNCTION = "patch" + CATEGORY = "KJNodes/experimental" + DESCRIPTION = "Patch the comfy patch_model function patching order, useful for torch.compile (used as object_patch) as it should come last if you want to use LoRAs with compile" + EXPERIMENTAL = True + + def patch(self, model, patch_order, full_load): + comfy.model_patcher.ModelPatcher.temp_object_patches_backup = {} + setattr(model.model, "full_load_override", full_load) + if patch_order == "weight_patch_first": + comfy.model_patcher.ModelPatcher.patch_model = patched_patch_model + comfy.sd.load_lora_for_models = patched_load_lora_for_models + else: + comfy.model_patcher.ModelPatcher.patch_model = original_patch_model + comfy.sd.load_lora_for_models = original_load_lora_for_models + + return model, + +class TorchCompileModelFluxAdvanced: + def __init__(self): + self._compiled = False + + @classmethod + def INPUT_TYPES(s): + return {"required": { + "model": ("MODEL",), + "backend": (["inductor", "cudagraphs"],), + "fullgraph": ("BOOLEAN", {"default": False, "tooltip": "Enable full graph mode"}), + "mode": (["default", "max-autotune", "max-autotune-no-cudagraphs", "reduce-overhead"], {"default": "default"}), + "double_blocks": ("STRING", {"default": "0-18", "multiline": True}), + "single_blocks": ("STRING", {"default": "0-37", "multiline": True}), + "dynamic": ("BOOLEAN", {"default": False, "tooltip": "Enable dynamic mode"}), + }, + "optional": { + "dynamo_cache_size_limit": ("INT", {"default": 64, "min": 0, "max": 1024, "step": 1, "tooltip": "torch._dynamo.config.cache_size_limit"}), + } + } + RETURN_TYPES = ("MODEL",) + FUNCTION = "patch" + + CATEGORY = "KJNodes/torchcompile" + EXPERIMENTAL = True + + def parse_blocks(self, blocks_str): + blocks = [] + for part in blocks_str.split(','): + part = part.strip() + if '-' in part: + start, end = map(int, part.split('-')) + blocks.extend(range(start, end + 1)) + else: + blocks.append(int(part)) + return blocks + + def patch(self, model, backend, mode, fullgraph, single_blocks, double_blocks, dynamic, dynamo_cache_size_limit): + single_block_list = self.parse_blocks(single_blocks) + double_block_list = self.parse_blocks(double_blocks) + m = model.clone() + diffusion_model = m.get_model_object("diffusion_model") + torch._dynamo.config.cache_size_limit = dynamo_cache_size_limit + + if not self._compiled: + try: + for i, block in enumerate(diffusion_model.double_blocks): + if i in double_block_list: + #print("Compiling double_block", i) + m.add_object_patch(f"diffusion_model.double_blocks.{i}", torch.compile(block, mode=mode, dynamic=dynamic, fullgraph=fullgraph, backend=backend)) + for i, block in enumerate(diffusion_model.single_blocks): + if i in single_block_list: + #print("Compiling single block", i) + m.add_object_patch(f"diffusion_model.single_blocks.{i}", torch.compile(block, mode=mode, dynamic=dynamic, fullgraph=fullgraph, backend=backend)) + self._compiled = True + compile_settings = { + "backend": backend, + "mode": mode, + "fullgraph": fullgraph, + "dynamic": dynamic, + } + setattr(m.model, "compile_settings", compile_settings) + except: + raise RuntimeError("Failed to compile model") + + return (m, ) + # rest of the layers that are not patched + # diffusion_model.final_layer = torch.compile(diffusion_model.final_layer, mode=mode, fullgraph=fullgraph, backend=backend) + # diffusion_model.guidance_in = torch.compile(diffusion_model.guidance_in, mode=mode, fullgraph=fullgraph, backend=backend) + # diffusion_model.img_in = torch.compile(diffusion_model.img_in, mode=mode, fullgraph=fullgraph, backend=backend) + # diffusion_model.time_in = torch.compile(diffusion_model.time_in, mode=mode, fullgraph=fullgraph, backend=backend) + # diffusion_model.txt_in = torch.compile(diffusion_model.txt_in, mode=mode, fullgraph=fullgraph, backend=backend) + # diffusion_model.vector_in = torch.compile(diffusion_model.vector_in, mode=mode, fullgraph=fullgraph, backend=backend) + +class TorchCompileModelHyVideo: + def __init__(self): + self._compiled = False + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "model": ("MODEL",), + "backend": (["inductor","cudagraphs"], {"default": "inductor"}), + "fullgraph": ("BOOLEAN", {"default": False, "tooltip": "Enable full graph mode"}), + "mode": (["default", "max-autotune", "max-autotune-no-cudagraphs", "reduce-overhead"], {"default": "default"}), + "dynamic": ("BOOLEAN", {"default": False, "tooltip": "Enable dynamic mode"}), + "dynamo_cache_size_limit": ("INT", {"default": 64, "min": 0, "max": 1024, "step": 1, "tooltip": "torch._dynamo.config.cache_size_limit"}), + "compile_single_blocks": ("BOOLEAN", {"default": True, "tooltip": "Compile single blocks"}), + "compile_double_blocks": ("BOOLEAN", {"default": True, "tooltip": "Compile double blocks"}), + "compile_txt_in": ("BOOLEAN", {"default": False, "tooltip": "Compile txt_in layers"}), + "compile_vector_in": ("BOOLEAN", {"default": False, "tooltip": "Compile vector_in layers"}), + "compile_final_layer": ("BOOLEAN", {"default": False, "tooltip": "Compile final layer"}), + + }, + } + RETURN_TYPES = ("MODEL",) + FUNCTION = "patch" + + CATEGORY = "KJNodes/torchcompile" + EXPERIMENTAL = True + + def patch(self, model, backend, fullgraph, mode, dynamic, dynamo_cache_size_limit, compile_single_blocks, compile_double_blocks, compile_txt_in, compile_vector_in, compile_final_layer): + m = model.clone() + diffusion_model = m.get_model_object("diffusion_model") + torch._dynamo.config.cache_size_limit = dynamo_cache_size_limit + if not self._compiled: + try: + if compile_single_blocks: + for i, block in enumerate(diffusion_model.single_blocks): + compiled_block = torch.compile(block, fullgraph=fullgraph, dynamic=dynamic, backend=backend, mode=mode) + m.add_object_patch(f"diffusion_model.single_blocks.{i}", compiled_block) + if compile_double_blocks: + for i, block in enumerate(diffusion_model.double_blocks): + compiled_block = torch.compile(block, fullgraph=fullgraph, dynamic=dynamic, backend=backend, mode=mode) + m.add_object_patch(f"diffusion_model.double_blocks.{i}", compiled_block) + if compile_txt_in: + compiled_block = torch.compile(diffusion_model.txt_in, fullgraph=fullgraph, dynamic=dynamic, backend=backend, mode=mode) + m.add_object_patch("diffusion_model.txt_in", compiled_block) + if compile_vector_in: + compiled_block = torch.compile(diffusion_model.vector_in, fullgraph=fullgraph, dynamic=dynamic, backend=backend, mode=mode) + m.add_object_patch("diffusion_model.vector_in", compiled_block) + if compile_final_layer: + compiled_block = torch.compile(diffusion_model.final_layer, fullgraph=fullgraph, dynamic=dynamic, backend=backend, mode=mode) + m.add_object_patch("diffusion_model.final_layer", compiled_block) + self._compiled = True + compile_settings = { + "backend": backend, + "mode": mode, + "fullgraph": fullgraph, + "dynamic": dynamic, + } + setattr(m.model, "compile_settings", compile_settings) + except: + raise RuntimeError("Failed to compile model") + return (m, ) + +class TorchCompileModelWanVideo: + def __init__(self): + self._compiled = False + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "model": ("MODEL",), + "backend": (["inductor","cudagraphs"], {"default": "inductor"}), + "fullgraph": ("BOOLEAN", {"default": False, "tooltip": "Enable full graph mode"}), + "mode": (["default", "max-autotune", "max-autotune-no-cudagraphs", "reduce-overhead"], {"default": "default"}), + "dynamic": ("BOOLEAN", {"default": False, "tooltip": "Enable dynamic mode"}), + "dynamo_cache_size_limit": ("INT", {"default": 64, "min": 0, "max": 1024, "step": 1, "tooltip": "torch._dynamo.config.cache_size_limit"}), + "compile_transformer_blocks_only": ("BOOLEAN", {"default": False, "tooltip": "Compile only transformer blocks"}), + }, + } + RETURN_TYPES = ("MODEL",) + FUNCTION = "patch" + + CATEGORY = "KJNodes/torchcompile" + EXPERIMENTAL = True + + def patch(self, model, backend, fullgraph, mode, dynamic, dynamo_cache_size_limit, compile_transformer_blocks_only): + m = model.clone() + diffusion_model = m.get_model_object("diffusion_model") + torch._dynamo.config.cache_size_limit = dynamo_cache_size_limit + is_compiled = hasattr(model.model.diffusion_model.blocks[0], "_orig_mod") + if is_compiled: + logging.info(f"Already compiled, not reapplying") + else: + logging.info(f"Not compiled, applying") + try: + if compile_transformer_blocks_only: + for i, block in enumerate(diffusion_model.blocks): + if is_compiled: + compiled_block = torch.compile(block._orig_mod, fullgraph=fullgraph, dynamic=dynamic, backend=backend, mode=mode) + else: + compiled_block = torch.compile(block, fullgraph=fullgraph, dynamic=dynamic, backend=backend, mode=mode) + m.add_object_patch(f"diffusion_model.blocks.{i}", compiled_block) + else: + compiled_model = torch.compile(diffusion_model, fullgraph=fullgraph, dynamic=dynamic, backend=backend, mode=mode) + m.add_object_patch("diffusion_model", compiled_model) + + compile_settings = { + "backend": backend, + "mode": mode, + "fullgraph": fullgraph, + "dynamic": dynamic, + } + setattr(m.model, "compile_settings", compile_settings) + except: + raise RuntimeError("Failed to compile model") + return (m, ) + +class TorchCompileVAE: + def __init__(self): + self._compiled_encoder = False + self._compiled_decoder = False + + @classmethod + def INPUT_TYPES(s): + return {"required": { + "vae": ("VAE",), + "backend": (["inductor", "cudagraphs"],), + "fullgraph": ("BOOLEAN", {"default": False, "tooltip": "Enable full graph mode"}), + "mode": (["default", "max-autotune", "max-autotune-no-cudagraphs", "reduce-overhead"], {"default": "default"}), + "compile_encoder": ("BOOLEAN", {"default": True, "tooltip": "Compile encoder"}), + "compile_decoder": ("BOOLEAN", {"default": True, "tooltip": "Compile decoder"}), + }} + RETURN_TYPES = ("VAE",) + FUNCTION = "compile" + + CATEGORY = "KJNodes/torchcompile" + EXPERIMENTAL = True + + def compile(self, vae, backend, mode, fullgraph, compile_encoder, compile_decoder): + if compile_encoder: + if not self._compiled_encoder: + encoder_name = "encoder" + if hasattr(vae.first_stage_model, "taesd_encoder"): + encoder_name = "taesd_encoder" + + try: + setattr( + vae.first_stage_model, + encoder_name, + torch.compile( + getattr(vae.first_stage_model, encoder_name), + mode=mode, + fullgraph=fullgraph, + backend=backend, + ), + ) + self._compiled_encoder = True + except: + raise RuntimeError("Failed to compile model") + if compile_decoder: + if not self._compiled_decoder: + decoder_name = "decoder" + if hasattr(vae.first_stage_model, "taesd_decoder"): + decoder_name = "taesd_decoder" + + try: + setattr( + vae.first_stage_model, + decoder_name, + torch.compile( + getattr(vae.first_stage_model, decoder_name), + mode=mode, + fullgraph=fullgraph, + backend=backend, + ), + ) + self._compiled_decoder = True + except: + raise RuntimeError("Failed to compile model") + return (vae, ) + +class TorchCompileControlNet: + def __init__(self): + self._compiled= False + + @classmethod + def INPUT_TYPES(s): + return {"required": { + "controlnet": ("CONTROL_NET",), + "backend": (["inductor", "cudagraphs"],), + "fullgraph": ("BOOLEAN", {"default": False, "tooltip": "Enable full graph mode"}), + "mode": (["default", "max-autotune", "max-autotune-no-cudagraphs", "reduce-overhead"], {"default": "default"}), + }} + RETURN_TYPES = ("CONTROL_NET",) + FUNCTION = "compile" + + CATEGORY = "KJNodes/torchcompile" + EXPERIMENTAL = True + + def compile(self, controlnet, backend, mode, fullgraph): + if not self._compiled: + try: + # for i, block in enumerate(controlnet.control_model.double_blocks): + # print("Compiling controlnet double_block", i) + # controlnet.control_model.double_blocks[i] = torch.compile(block, mode=mode, fullgraph=fullgraph, backend=backend) + controlnet.control_model = torch.compile(controlnet.control_model, mode=mode, fullgraph=fullgraph, backend=backend) + self._compiled = True + except: + self._compiled = False + raise RuntimeError("Failed to compile model") + + return (controlnet, ) + +class TorchCompileLTXModel: + def __init__(self): + self._compiled = False + + @classmethod + def INPUT_TYPES(s): + return {"required": { + "model": ("MODEL",), + "backend": (["inductor", "cudagraphs"],), + "fullgraph": ("BOOLEAN", {"default": False, "tooltip": "Enable full graph mode"}), + "mode": (["default", "max-autotune", "max-autotune-no-cudagraphs", "reduce-overhead"], {"default": "default"}), + "dynamic": ("BOOLEAN", {"default": False, "tooltip": "Enable dynamic mode"}), + }} + RETURN_TYPES = ("MODEL",) + FUNCTION = "patch" + + CATEGORY = "KJNodes/torchcompile" + EXPERIMENTAL = True + + def patch(self, model, backend, mode, fullgraph, dynamic): + m = model.clone() + diffusion_model = m.get_model_object("diffusion_model") + + if not self._compiled: + try: + for i, block in enumerate(diffusion_model.transformer_blocks): + compiled_block = torch.compile(block, mode=mode, dynamic=dynamic, fullgraph=fullgraph, backend=backend) + m.add_object_patch(f"diffusion_model.transformer_blocks.{i}", compiled_block) + self._compiled = True + compile_settings = { + "backend": backend, + "mode": mode, + "fullgraph": fullgraph, + "dynamic": dynamic, + } + setattr(m.model, "compile_settings", compile_settings) + + except: + raise RuntimeError("Failed to compile model") + + return (m, ) + +class TorchCompileCosmosModel: + def __init__(self): + self._compiled = False + + @classmethod + def INPUT_TYPES(s): + return {"required": { + "model": ("MODEL",), + "backend": (["inductor", "cudagraphs"],), + "fullgraph": ("BOOLEAN", {"default": False, "tooltip": "Enable full graph mode"}), + "mode": (["default", "max-autotune", "max-autotune-no-cudagraphs", "reduce-overhead"], {"default": "default"}), + "dynamic": ("BOOLEAN", {"default": False, "tooltip": "Enable dynamic mode"}), + "dynamo_cache_size_limit": ("INT", {"default": 64, "tooltip": "Set the dynamo cache size limit"}), + }} + RETURN_TYPES = ("MODEL",) + FUNCTION = "patch" + + CATEGORY = "KJNodes/torchcompile" + EXPERIMENTAL = True + + def patch(self, model, backend, mode, fullgraph, dynamic, dynamo_cache_size_limit): + + m = model.clone() + diffusion_model = m.get_model_object("diffusion_model") + torch._dynamo.config.cache_size_limit = dynamo_cache_size_limit + + if not self._compiled: + try: + for name, block in diffusion_model.blocks.items(): + #print(f"Compiling block {name}") + compiled_block = torch.compile(block, mode=mode, dynamic=dynamic, fullgraph=fullgraph, backend=backend) + m.add_object_patch(f"diffusion_model.blocks.{name}", compiled_block) + #diffusion_model.blocks[name] = compiled_block + + self._compiled = True + compile_settings = { + "backend": backend, + "mode": mode, + "fullgraph": fullgraph, + "dynamic": dynamic, + } + setattr(m.model, "compile_settings", compile_settings) + + except: + raise RuntimeError("Failed to compile model") + + return (m, ) + + +#teacache + +try: + from comfy.ldm.wan.model import sinusoidal_embedding_1d +except: + pass +from einops import repeat +from unittest.mock import patch +from contextlib import nullcontext +import numpy as np + +def relative_l1_distance(last_tensor, current_tensor): + l1_distance = torch.abs(last_tensor - current_tensor).mean() + norm = torch.abs(last_tensor).mean() + relative_l1_distance = l1_distance / norm + return relative_l1_distance.to(torch.float32) + +def teacache_wanvideo_forward_orig(self, x, t, context, clip_fea=None, freqs=None, transformer_options={}, **kwargs): + # embeddings + x = self.patch_embedding(x.float()).to(x.dtype) + grid_sizes = x.shape[2:] + x = x.flatten(2).transpose(1, 2) + + # time embeddings + e = self.time_embedding( + sinusoidal_embedding_1d(self.freq_dim, t).to(dtype=x[0].dtype)) + e0 = self.time_projection(e).unflatten(1, (6, self.dim)) + + # context + context = self.text_embedding(context) + if clip_fea is not None and self.img_emb is not None: + context_clip = self.img_emb(clip_fea) # bs x 257 x dim + context = torch.concat([context_clip, context], dim=1) + + @torch.compiler.disable() + def tea_cache(x, e0, e, kwargs): + #teacache for cond and uncond separately + rel_l1_thresh = transformer_options["rel_l1_thresh"] + + is_cond = True if transformer_options["cond_or_uncond"] == [0] else False + + should_calc = True + suffix = "cond" if is_cond else "uncond" + + # Init cache dict if not exists + if not hasattr(self, 'teacache_state'): + self.teacache_state = { + 'cond': {'accumulated_rel_l1_distance': 0, 'prev_input': None, + 'teacache_skipped_steps': 0, 'previous_residual': None}, + 'uncond': {'accumulated_rel_l1_distance': 0, 'prev_input': None, + 'teacache_skipped_steps': 0, 'previous_residual': None} + } + logging.info("\nTeaCache: Initialized") + + cache = self.teacache_state[suffix] + + if cache['prev_input'] is not None: + if transformer_options["coefficients"] == []: + temb_relative_l1 = relative_l1_distance(cache['prev_input'], e0) + curr_acc_dist = cache['accumulated_rel_l1_distance'] + temb_relative_l1 + else: + rescale_func = np.poly1d(transformer_options["coefficients"]) + curr_acc_dist = cache['accumulated_rel_l1_distance'] + rescale_func(((e-cache['prev_input']).abs().mean() / cache['prev_input'].abs().mean()).cpu().item()) + try: + if curr_acc_dist < rel_l1_thresh: + should_calc = False + cache['accumulated_rel_l1_distance'] = curr_acc_dist + else: + should_calc = True + cache['accumulated_rel_l1_distance'] = 0 + except: + should_calc = True + cache['accumulated_rel_l1_distance'] = 0 + + if transformer_options["coefficients"] == []: + cache['prev_input'] = e0.clone().detach() + else: + cache['prev_input'] = e.clone().detach() + + if not should_calc: + x += cache['previous_residual'].to(x.device) + cache['teacache_skipped_steps'] += 1 + #print(f"TeaCache: Skipping {suffix} step") + return should_calc, cache + + if not transformer_options: + raise RuntimeError("Can't access transformer_options, this requires ComfyUI nightly version from Mar 14, 2025 or later") + + teacache_enabled = transformer_options.get("teacache_enabled", False) + if not teacache_enabled: + should_calc = True + else: + should_calc, cache = tea_cache(x, e0, e, kwargs) + + if should_calc: + original_x = x.clone().detach() + patches_replace = transformer_options.get("patches_replace", {}) + blocks_replace = patches_replace.get("dit", {}) + for i, block in enumerate(self.blocks): + if ("double_block", i) in blocks_replace: + def block_wrap(args): + out = {} + out["img"] = block(args["img"], context=args["txt"], e=args["vec"], freqs=args["pe"]) + return out + out = blocks_replace[("double_block", i)]({"img": x, "txt": context, "vec": e0, "pe": freqs}, {"original_block": block_wrap, "transformer_options": transformer_options}) + x = out["img"] + else: + x = block(x, e=e0, freqs=freqs, context=context) + + if teacache_enabled: + cache['previous_residual'] = (x - original_x).to(transformer_options["teacache_device"]) + + # head + x = self.head(x, e) + + # unpatchify + x = self.unpatchify(x, grid_sizes) + return x + +class WanVideoTeaCacheKJ: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "model": ("MODEL",), + "rel_l1_thresh": ("FLOAT", {"default": 0.275, "min": 0.0, "max": 10.0, "step": 0.001, "tooltip": "Threshold for to determine when to apply the cache, compromise between speed and accuracy. When using coefficients a good value range is something between 0.2-0.4 for all but 1.3B model, which should be about 10 times smaller, same as when not using coefficients."}), + "start_percent": ("FLOAT", {"default": 0.1, "min": 0.0, "max": 1.0, "step": 0.01, "tooltip": "The start percentage of the steps to use with TeaCache."}), + "end_percent": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01, "tooltip": "The end percentage of the steps to use with TeaCache."}), + "cache_device": (["main_device", "offload_device"], {"default": "offload_device", "tooltip": "Device to cache to"}), + "coefficients": (["disabled", "1.3B", "14B", "i2v_480", "i2v_720"], {"default": "i2v_480", "tooltip": "Coefficients for rescaling the relative l1 distance, if disabled the threshold value should be about 10 times smaller than the value used with coefficients."}), + } + } + + RETURN_TYPES = ("MODEL",) + RETURN_NAMES = ("model",) + FUNCTION = "patch_teacache" + CATEGORY = "KJNodes/teacache" + DESCRIPTION = """ +Patch WanVideo model to use TeaCache. Speeds up inference by caching the output and +applying it instead of doing the step. Best results are achieved by choosing the +appropriate coefficients for the model. Early steps should never be skipped, with too +aggressive values this can happen and the motion suffers. Starting later can help with that too. +When NOT using coefficients, the threshold value should be +about 10 times smaller than the value used with coefficients. + +Official recommended values https://github.com/ali-vilab/TeaCache/tree/main/TeaCache4Wan2.1: + + +
    ++-------------------+--------+---------+--------+
    +|       Model       |  Low   | Medium  |  High  |
    ++-------------------+--------+---------+--------+
    +| Wan2.1 t2v 1.3B  |  0.05  |  0.07   |  0.08  |
    +| Wan2.1 t2v 14B   |  0.14  |  0.15   |  0.20  |
    +| Wan2.1 i2v 480P  |  0.13  |  0.19   |  0.26  |
    +| Wan2.1 i2v 720P  |  0.18  |  0.20   |  0.30  |
    ++-------------------+--------+---------+--------+
    +
    +""" + EXPERIMENTAL = True + + def patch_teacache(self, model, rel_l1_thresh, start_percent, end_percent, cache_device, coefficients): + if rel_l1_thresh == 0: + return (model,) + + if coefficients == "disabled" and rel_l1_thresh > 0.1: + logging.warning("Threshold value is too high for TeaCache without coefficients, consider using coefficients for better results.") + if coefficients != "disabled" and rel_l1_thresh < 0.1 and "1.3B" not in coefficients: + logging.warning("Threshold value is too low for TeaCache with coefficients, consider using higher threshold value for better results.") + + # type_str = str(type(model.model.model_config).__name__) + #if model.model.diffusion_model.dim == 1536: + # model_type ="1.3B" + # else: + # if "WAN21_T2V" in type_str: + # model_type = "14B" + # elif "WAN21_I2V" in type_str: + # model_type = "i2v_480" + # else: + # model_type = "i2v_720" #how to detect this? + + + teacache_coefficients_map = { + "disabled": [], + "1.3B": [2.39676752e+03, -1.31110545e+03, 2.01331979e+02, -8.29855975e+00, 1.37887774e-01], + "14B": [-5784.54975374, 5449.50911966, -1811.16591783, 256.27178429, -13.02252404], + "i2v_480": [-3.02331670e+02, 2.23948934e+02, -5.25463970e+01, 5.87348440e+00, -2.01973289e-01], + "i2v_720": [-114.36346466, 65.26524496, -18.82220707, 4.91518089, -0.23412683], + } + coefficients = teacache_coefficients_map[coefficients] + + teacache_device = mm.get_torch_device() if cache_device == "main_device" else mm.unet_offload_device() + + model_clone = model.clone() + if 'transformer_options' not in model_clone.model_options: + model_clone.model_options['transformer_options'] = {} + model_clone.model_options["transformer_options"]["rel_l1_thresh"] = rel_l1_thresh + model_clone.model_options["transformer_options"]["teacache_device"] = teacache_device + model_clone.model_options["transformer_options"]["coefficients"] = coefficients + diffusion_model = model_clone.get_model_object("diffusion_model") + + def outer_wrapper(start_percent, end_percent): + def unet_wrapper_function(model_function, kwargs): + input = kwargs["input"] + timestep = kwargs["timestep"] + c = kwargs["c"] + sigmas = c["transformer_options"]["sample_sigmas"] + cond_or_uncond = kwargs["cond_or_uncond"] + last_step = (len(sigmas) - 1) + + matched_step_index = (sigmas == timestep[0] ).nonzero() + if len(matched_step_index) > 0: + current_step_index = matched_step_index.item() + else: + for i in range(len(sigmas) - 1): + # walk from beginning of steps until crossing the timestep + if (sigmas[i] - timestep[0]) * (sigmas[i + 1] - timestep[0]) <= 0: + current_step_index = i + break + else: + current_step_index = 0 + + if current_step_index == 0: + if hasattr(diffusion_model, "teacache_state"): + delattr(diffusion_model, "teacache_state") + logging.info("\nResetting TeaCache state") + + current_percent = current_step_index / (len(sigmas) - 1) + c["transformer_options"]["current_percent"] = current_percent + if start_percent <= current_percent <= end_percent: + c["transformer_options"]["teacache_enabled"] = True + + context = patch.multiple( + diffusion_model, + forward_orig=teacache_wanvideo_forward_orig.__get__(diffusion_model, diffusion_model.__class__) + ) + + with context: + out = model_function(input, timestep, **c) + if current_step_index+1 == last_step and hasattr(diffusion_model, "teacache_state"): + if len(cond_or_uncond) == 1 and cond_or_uncond[0] == 0: + skipped_steps_cond = diffusion_model.teacache_state["cond"]["teacache_skipped_steps"] + skipped_steps_uncond = diffusion_model.teacache_state["uncond"]["teacache_skipped_steps"] + logging.info("-----------------------------------") + logging.info(f"TeaCache skipped:") + logging.info(f"{skipped_steps_cond} cond steps") + logging.info(f"{skipped_steps_uncond} uncond step") + logging.info(f"out of {last_step} steps") + logging.info("-----------------------------------") + elif len(cond_or_uncond) == 2: + skipped_steps_cond = diffusion_model.teacache_state["uncond"]["teacache_skipped_steps"] + logging.info("-----------------------------------") + logging.info(f"TeaCache skipped:") + logging.info(f"{skipped_steps_cond} cond steps") + logging.info(f"out of {last_step} steps") + logging.info("-----------------------------------") + + return out + return unet_wrapper_function + + model_clone.set_model_unet_function_wrapper(outer_wrapper(start_percent=start_percent, end_percent=end_percent)) + + return (model_clone,) + + + +from comfy.ldm.modules.attention import optimized_attention +from comfy.ldm.flux.math import apply_rope + +def modified_wan_self_attention_forward(self, x, freqs): + r""" + Args: + x(Tensor): Shape [B, L, num_heads, C / num_heads] + freqs(Tensor): Rope freqs, shape [1024, C / num_heads / 2] + """ + b, s, n, d = *x.shape[:2], self.num_heads, self.head_dim + + # query, key, value function + def qkv_fn(x): + q = self.norm_q(self.q(x)).view(b, s, n, d) + k = self.norm_k(self.k(x)).view(b, s, n, d) + v = self.v(x).view(b, s, n * d) + return q, k, v + + q, k, v = qkv_fn(x) + + q, k = apply_rope(q, k, freqs) + + feta_scores = get_feta_scores(q, k, self.num_frames, self.enhance_weight) + + x = optimized_attention( + q.view(b, s, n * d), + k.view(b, s, n * d), + v, + heads=self.num_heads, + ) + + x = self.o(x) + + x *= feta_scores + + return x + +from einops import rearrange +def get_feta_scores(query, key, num_frames, enhance_weight): + img_q, img_k = query, key #torch.Size([2, 9216, 12, 128]) + + _, ST, num_heads, head_dim = img_q.shape + spatial_dim = ST / num_frames + spatial_dim = int(spatial_dim) + + query_image = rearrange( + img_q, "B (T S) N C -> (B S) N T C", T=num_frames, S=spatial_dim, N=num_heads, C=head_dim + ) + key_image = rearrange( + img_k, "B (T S) N C -> (B S) N T C", T=num_frames, S=spatial_dim, N=num_heads, C=head_dim + ) + + return feta_score(query_image, key_image, head_dim, num_frames, enhance_weight) + +def feta_score(query_image, key_image, head_dim, num_frames, enhance_weight): + scale = head_dim**-0.5 + query_image = query_image * scale + attn_temp = query_image @ key_image.transpose(-2, -1) # translate attn to float32 + attn_temp = attn_temp.to(torch.float32) + attn_temp = attn_temp.softmax(dim=-1) + + # Reshape to [batch_size * num_tokens, num_frames, num_frames] + attn_temp = attn_temp.reshape(-1, num_frames, num_frames) + + # Create a mask for diagonal elements + diag_mask = torch.eye(num_frames, device=attn_temp.device).bool() + diag_mask = diag_mask.unsqueeze(0).expand(attn_temp.shape[0], -1, -1) + + # Zero out diagonal elements + attn_wo_diag = attn_temp.masked_fill(diag_mask, 0) + + # Calculate mean for each token's attention matrix + # Number of off-diagonal elements per matrix is n*n - n + num_off_diag = num_frames * num_frames - num_frames + mean_scores = attn_wo_diag.sum(dim=(1, 2)) / num_off_diag + + enhance_scores = mean_scores.mean() * (num_frames + enhance_weight) + enhance_scores = enhance_scores.clamp(min=1) + return enhance_scores + +import types +class WanAttentionPatch: + def __init__(self, num_frames, weight): + self.num_frames = num_frames + self.enhance_weight = weight + + def __get__(self, obj, objtype=None): + # Create bound method with stored parameters + def wrapped_attention(self_module, *args, **kwargs): + self_module.num_frames = self.num_frames + self_module.enhance_weight = self.enhance_weight + return modified_wan_self_attention_forward(self_module, *args, **kwargs) + return types.MethodType(wrapped_attention, obj) + +class WanVideoEnhanceAVideoKJ: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "model": ("MODEL",), + "latent": ("LATENT", {"tooltip": "Only used to get the latent count"}), + "weight": ("FLOAT", {"default": 0.2, "min": 0.0, "max": 10.0, "step": 0.001, "tooltip": "Strength of the enhance effect"}), + } + } + + RETURN_TYPES = ("MODEL",) + RETURN_NAMES = ("model",) + FUNCTION = "enhance" + CATEGORY = "KJNodes/experimental" + DESCRIPTION = "https://github.com/NUS-HPC-AI-Lab/Enhance-A-Video" + EXPERIMENTAL = True + + def enhance(self, model, weight, latent): + if weight == 0: + return (model,) + + num_frames = latent["samples"].shape[2] + + model_clone = model.clone() + if 'transformer_options' not in model_clone.model_options: + model_clone.model_options['transformer_options'] = {} + model_clone.model_options["transformer_options"]["enhance_weight"] = weight + diffusion_model = model_clone.get_model_object("diffusion_model") + + compile_settings = getattr(model.model, "compile_settings", None) + for idx, block in enumerate(diffusion_model.blocks): + patched_attn = WanAttentionPatch(num_frames, weight).__get__(block.self_attn, block.__class__) + if compile_settings is not None: + patched_attn = torch.compile(patched_attn, mode=compile_settings["mode"], dynamic=compile_settings["dynamic"], fullgraph=compile_settings["fullgraph"], backend=compile_settings["backend"]) + + model_clone.add_object_patch(f"diffusion_model.blocks.{idx}.self_attn.forward", patched_attn) + + return (model_clone,) + +class SkipLayerGuidanceWanVideo: + @classmethod + def INPUT_TYPES(s): + return {"required": {"model": ("MODEL", ), + "blocks": ("STRING", {"default": "10", "multiline": False}), + "start_percent": ("FLOAT", {"default": 0.2, "min": 0.0, "max": 1.0, "step": 0.001}), + "end_percent": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.001}), + }} + RETURN_TYPES = ("MODEL",) + FUNCTION = "slg" + EXPERIMENTAL = True + DESCRIPTION = "Simplified skip layer guidance that only skips the uncond on selected blocks" + + CATEGORY = "advanced/guidance" + + def slg(self, model, start_percent, end_percent, blocks): + def skip(args, extra_args): + transformer_options = extra_args.get("transformer_options", {}) + original_block = extra_args["original_block"] + + if not transformer_options: + raise ValueError("transformer_options not found in extra_args, currently SkipLayerGuidanceWanVideo only works with TeaCacheKJ") + if start_percent <= transformer_options["current_percent"] <= end_percent: + if args["img"].shape[0] == 2: + prev_img_uncond = args["img"][0].unsqueeze(0) + + new_args = { + "img": args["img"][1], + "txt": args["txt"][1], + "vec": args["vec"][1], + "pe": args["pe"][1] + } + + block_out = original_block(new_args) + + out = { + "img": torch.cat([prev_img_uncond, block_out["img"]], dim=0), + "txt": args["txt"], + "vec": args["vec"], + "pe": args["pe"] + } + else: + if transformer_options.get("cond_or_uncond") == [0]: + out = original_block(args) + else: + out = args + else: + out = original_block(args) + return out + + block_list = [int(x.strip()) for x in blocks.split(",")] + blocks = [int(i) for i in block_list] + logging.info(f"Selected blocks to skip uncond on: {blocks}") + + m = model.clone() + + for b in blocks: + #m.set_model_patch_replace(skip, "dit", "double_block", b) + model_options = m.model_options["transformer_options"].copy() + if "patches_replace" not in model_options: + model_options["patches_replace"] = {} + else: + model_options["patches_replace"] = model_options["patches_replace"].copy() + + if "dit" not in model_options["patches_replace"]: + model_options["patches_replace"]["dit"] = {} + else: + model_options["patches_replace"]["dit"] = model_options["patches_replace"]["dit"].copy() + + block = ("double_block", b) + + model_options["patches_replace"]["dit"][block] = skip + m.model_options["transformer_options"] = model_options + + + return (m, ) \ No newline at end of file diff --git a/custom_nodes/ComfyUI-KJNodes-main/nodes/nodes.py b/custom_nodes/ComfyUI-KJNodes-main/nodes/nodes.py new file mode 100644 index 0000000000000000000000000000000000000000..719ae40e03ded10d162e181f481b3dbe22f4a243 --- /dev/null +++ b/custom_nodes/ComfyUI-KJNodes-main/nodes/nodes.py @@ -0,0 +1,2728 @@ +import torch +import torch.nn as nn +import numpy as np +from PIL import Image +from typing import Union +import json, re, os, io, time, platform +import re +import importlib + +import model_management +import folder_paths +from nodes import MAX_RESOLUTION +from comfy.utils import common_upscale, ProgressBar, load_torch_file + +script_directory = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) +folder_paths.add_model_folder_path("kjnodes_fonts", os.path.join(script_directory, "fonts")) + +class AnyType(str): + """A special class that is always equal in not equal comparisons. Credit to pythongosssss""" + + def __ne__(self, __value: object) -> bool: + return False +any = AnyType("*") + +class BOOLConstant: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "value": ("BOOLEAN", {"default": True}), + }, + } + RETURN_TYPES = ("BOOLEAN",) + RETURN_NAMES = ("value",) + FUNCTION = "get_value" + CATEGORY = "KJNodes/constants" + + def get_value(self, value): + return (value,) + +class INTConstant: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "value": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + }, + } + RETURN_TYPES = ("INT",) + RETURN_NAMES = ("value",) + FUNCTION = "get_value" + CATEGORY = "KJNodes/constants" + + def get_value(self, value): + return (value,) + +class FloatConstant: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "value": ("FLOAT", {"default": 0.0, "min": -0xffffffffffffffff, "max": 0xffffffffffffffff, "step": 0.00001}), + }, + } + + RETURN_TYPES = ("FLOAT",) + RETURN_NAMES = ("value",) + FUNCTION = "get_value" + CATEGORY = "KJNodes/constants" + + def get_value(self, value): + return (value,) + +class StringConstant: + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "string": ("STRING", {"default": '', "multiline": False}), + } + } + RETURN_TYPES = ("STRING",) + FUNCTION = "passtring" + CATEGORY = "KJNodes/constants" + + def passtring(self, string): + return (string, ) + +class StringConstantMultiline: + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "string": ("STRING", {"default": "", "multiline": True}), + "strip_newlines": ("BOOLEAN", {"default": True}), + } + } + RETURN_TYPES = ("STRING",) + FUNCTION = "stringify" + CATEGORY = "KJNodes/constants" + + def stringify(self, string, strip_newlines): + new_string = [] + for line in io.StringIO(string): + if not line.strip().startswith("\n") and strip_newlines: + line = line.replace("\n", '') + new_string.append(line) + new_string = "\n".join(new_string) + + return (new_string, ) + + + +class ScaleBatchPromptSchedule: + + RETURN_TYPES = ("STRING",) + FUNCTION = "scaleschedule" + CATEGORY = "KJNodes/misc" + DESCRIPTION = """ +Scales a batch schedule from Fizz' nodes BatchPromptSchedule +to a different frame count. +""" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "input_str": ("STRING", {"forceInput": True,"default": "0:(0.0),\n7:(1.0),\n15:(0.0)\n"}), + "old_frame_count": ("INT", {"forceInput": True,"default": 1,"min": 1, "max": 4096, "step": 1}), + "new_frame_count": ("INT", {"forceInput": True,"default": 1,"min": 1, "max": 4096, "step": 1}), + + }, + } + + def scaleschedule(self, old_frame_count, input_str, new_frame_count): + pattern = r'"(\d+)"\s*:\s*"(.*?)"(?:,|\Z)' + frame_strings = dict(re.findall(pattern, input_str)) + + # Calculate the scaling factor + scaling_factor = (new_frame_count - 1) / (old_frame_count - 1) + + # Initialize a dictionary to store the new frame numbers and strings + new_frame_strings = {} + + # Iterate over the frame numbers and strings + for old_frame, string in frame_strings.items(): + # Calculate the new frame number + new_frame = int(round(int(old_frame) * scaling_factor)) + + # Store the new frame number and corresponding string + new_frame_strings[new_frame] = string + + # Format the output string + output_str = ', '.join([f'"{k}":"{v}"' for k, v in sorted(new_frame_strings.items())]) + return (output_str,) + + +class GetLatentsFromBatchIndexed: + + RETURN_TYPES = ("LATENT",) + FUNCTION = "indexedlatentsfrombatch" + CATEGORY = "KJNodes/latents" + DESCRIPTION = """ +Selects and returns the latents at the specified indices as an latent batch. +""" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "latents": ("LATENT",), + "indexes": ("STRING", {"default": "0, 1, 2", "multiline": True}), + "latent_format": (["BCHW", "BTCHW", "BCTHW"], {"default": "BCHW"}), + }, + } + + def indexedlatentsfrombatch(self, latents, indexes, latent_format): + + samples = latents.copy() + latent_samples = samples["samples"] + + # Parse the indexes string into a list of integers + index_list = [int(index.strip()) for index in indexes.split(',')] + + # Convert list of indices to a PyTorch tensor + indices_tensor = torch.tensor(index_list, dtype=torch.long) + + # Select the latents at the specified indices + if latent_format == "BCHW": + chosen_latents = latent_samples[indices_tensor] + elif latent_format == "BTCHW": + chosen_latents = latent_samples[:, indices_tensor] + elif latent_format == "BCTHW": + chosen_latents = latent_samples[:, :, indices_tensor] + + samples["samples"] = chosen_latents + return (samples,) + + +class ConditioningMultiCombine: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "inputcount": ("INT", {"default": 2, "min": 2, "max": 20, "step": 1}), + "operation": (["combine", "concat"], {"default": "combine"}), + "conditioning_1": ("CONDITIONING", ), + "conditioning_2": ("CONDITIONING", ), + }, + } + + RETURN_TYPES = ("CONDITIONING", "INT") + RETURN_NAMES = ("combined", "inputcount") + FUNCTION = "combine" + CATEGORY = "KJNodes/masking/conditioning" + DESCRIPTION = """ +Combines multiple conditioning nodes into one +""" + + def combine(self, inputcount, operation, **kwargs): + from nodes import ConditioningCombine + from nodes import ConditioningConcat + cond_combine_node = ConditioningCombine() + cond_concat_node = ConditioningConcat() + cond = kwargs["conditioning_1"] + for c in range(1, inputcount): + new_cond = kwargs[f"conditioning_{c + 1}"] + if operation == "combine": + cond = cond_combine_node.combine(new_cond, cond)[0] + elif operation == "concat": + cond = cond_concat_node.concat(cond, new_cond)[0] + return (cond, inputcount,) + +class AppendStringsToList: + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "string1": ("STRING", {"default": '', "forceInput": True}), + "string2": ("STRING", {"default": '', "forceInput": True}), + } + } + RETURN_TYPES = ("STRING",) + FUNCTION = "joinstring" + CATEGORY = "KJNodes/text" + + def joinstring(self, string1, string2): + if not isinstance(string1, list): + string1 = [string1] + if not isinstance(string2, list): + string2 = [string2] + + joined_string = string1 + string2 + return (joined_string, ) + +class JoinStrings: + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "string1": ("STRING", {"default": '', "forceInput": True}), + "string2": ("STRING", {"default": '', "forceInput": True}), + "delimiter": ("STRING", {"default": ' ', "multiline": False}), + } + } + RETURN_TYPES = ("STRING",) + FUNCTION = "joinstring" + CATEGORY = "KJNodes/text" + + def joinstring(self, string1, string2, delimiter): + joined_string = string1 + delimiter + string2 + return (joined_string, ) + +class JoinStringMulti: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "inputcount": ("INT", {"default": 2, "min": 2, "max": 1000, "step": 1}), + "string_1": ("STRING", {"default": '', "forceInput": True}), + "string_2": ("STRING", {"default": '', "forceInput": True}), + "delimiter": ("STRING", {"default": ' ', "multiline": False}), + "return_list": ("BOOLEAN", {"default": False}), + }, + } + + RETURN_TYPES = ("STRING",) + RETURN_NAMES = ("string",) + FUNCTION = "combine" + CATEGORY = "KJNodes/text" + DESCRIPTION = """ +Creates single string, or a list of strings, from +multiple input strings. +You can set how many inputs the node has, +with the **inputcount** and clicking update. +""" + + def combine(self, inputcount, delimiter, **kwargs): + string = kwargs["string_1"] + return_list = kwargs["return_list"] + strings = [string] # Initialize a list with the first string + for c in range(1, inputcount): + new_string = kwargs[f"string_{c + 1}"] + if return_list: + strings.append(new_string) # Add new string to the list + else: + string = string + delimiter + new_string + if return_list: + return (strings,) # Return the list of strings + else: + return (string,) # Return the combined string + +class CondPassThrough: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + }, + "optional": { + "positive": ("CONDITIONING", ), + "negative": ("CONDITIONING", ), + }, + } + + RETURN_TYPES = ("CONDITIONING", "CONDITIONING",) + RETURN_NAMES = ("positive", "negative") + FUNCTION = "passthrough" + CATEGORY = "KJNodes/misc" + DESCRIPTION = """ + Simply passes through the positive and negative conditioning, + workaround for Set node not allowing bypassed inputs. +""" + + def passthrough(self, positive=None, negative=None): + return (positive, negative,) + +class ModelPassThrough: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + }, + "optional": { + "model": ("MODEL", ), + }, + } + + RETURN_TYPES = ("MODEL", ) + RETURN_NAMES = ("model",) + FUNCTION = "passthrough" + CATEGORY = "KJNodes/misc" + DESCRIPTION = """ + Simply passes through the model, + workaround for Set node not allowing bypassed inputs. +""" + + def passthrough(self, model=None): + return (model,) + +def append_helper(t, mask, c, set_area_to_bounds, strength): + n = [t[0], t[1].copy()] + _, h, w = mask.shape + n[1]['mask'] = mask + n[1]['set_area_to_bounds'] = set_area_to_bounds + n[1]['mask_strength'] = strength + c.append(n) + +class ConditioningSetMaskAndCombine: + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "positive_1": ("CONDITIONING", ), + "negative_1": ("CONDITIONING", ), + "positive_2": ("CONDITIONING", ), + "negative_2": ("CONDITIONING", ), + "mask_1": ("MASK", ), + "mask_2": ("MASK", ), + "mask_1_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), + "mask_2_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), + "set_cond_area": (["default", "mask bounds"],), + } + } + + RETURN_TYPES = ("CONDITIONING","CONDITIONING",) + RETURN_NAMES = ("combined_positive", "combined_negative",) + FUNCTION = "append" + CATEGORY = "KJNodes/masking/conditioning" + DESCRIPTION = """ +Bundles multiple conditioning mask and combine nodes into one,functionality is identical to ComfyUI native nodes +""" + + def append(self, positive_1, negative_1, positive_2, negative_2, mask_1, mask_2, set_cond_area, mask_1_strength, mask_2_strength): + c = [] + c2 = [] + set_area_to_bounds = False + if set_cond_area != "default": + set_area_to_bounds = True + if len(mask_1.shape) < 3: + mask_1 = mask_1.unsqueeze(0) + if len(mask_2.shape) < 3: + mask_2 = mask_2.unsqueeze(0) + for t in positive_1: + append_helper(t, mask_1, c, set_area_to_bounds, mask_1_strength) + for t in positive_2: + append_helper(t, mask_2, c, set_area_to_bounds, mask_2_strength) + for t in negative_1: + append_helper(t, mask_1, c2, set_area_to_bounds, mask_1_strength) + for t in negative_2: + append_helper(t, mask_2, c2, set_area_to_bounds, mask_2_strength) + return (c, c2) + +class ConditioningSetMaskAndCombine3: + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "positive_1": ("CONDITIONING", ), + "negative_1": ("CONDITIONING", ), + "positive_2": ("CONDITIONING", ), + "negative_2": ("CONDITIONING", ), + "positive_3": ("CONDITIONING", ), + "negative_3": ("CONDITIONING", ), + "mask_1": ("MASK", ), + "mask_2": ("MASK", ), + "mask_3": ("MASK", ), + "mask_1_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), + "mask_2_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), + "mask_3_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), + "set_cond_area": (["default", "mask bounds"],), + } + } + + RETURN_TYPES = ("CONDITIONING","CONDITIONING",) + RETURN_NAMES = ("combined_positive", "combined_negative",) + FUNCTION = "append" + CATEGORY = "KJNodes/masking/conditioning" + DESCRIPTION = """ +Bundles multiple conditioning mask and combine nodes into one,functionality is identical to ComfyUI native nodes +""" + + def append(self, positive_1, negative_1, positive_2, positive_3, negative_2, negative_3, mask_1, mask_2, mask_3, set_cond_area, mask_1_strength, mask_2_strength, mask_3_strength): + c = [] + c2 = [] + set_area_to_bounds = False + if set_cond_area != "default": + set_area_to_bounds = True + if len(mask_1.shape) < 3: + mask_1 = mask_1.unsqueeze(0) + if len(mask_2.shape) < 3: + mask_2 = mask_2.unsqueeze(0) + if len(mask_3.shape) < 3: + mask_3 = mask_3.unsqueeze(0) + for t in positive_1: + append_helper(t, mask_1, c, set_area_to_bounds, mask_1_strength) + for t in positive_2: + append_helper(t, mask_2, c, set_area_to_bounds, mask_2_strength) + for t in positive_3: + append_helper(t, mask_3, c, set_area_to_bounds, mask_3_strength) + for t in negative_1: + append_helper(t, mask_1, c2, set_area_to_bounds, mask_1_strength) + for t in negative_2: + append_helper(t, mask_2, c2, set_area_to_bounds, mask_2_strength) + for t in negative_3: + append_helper(t, mask_3, c2, set_area_to_bounds, mask_3_strength) + return (c, c2) + +class ConditioningSetMaskAndCombine4: + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "positive_1": ("CONDITIONING", ), + "negative_1": ("CONDITIONING", ), + "positive_2": ("CONDITIONING", ), + "negative_2": ("CONDITIONING", ), + "positive_3": ("CONDITIONING", ), + "negative_3": ("CONDITIONING", ), + "positive_4": ("CONDITIONING", ), + "negative_4": ("CONDITIONING", ), + "mask_1": ("MASK", ), + "mask_2": ("MASK", ), + "mask_3": ("MASK", ), + "mask_4": ("MASK", ), + "mask_1_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), + "mask_2_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), + "mask_3_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), + "mask_4_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), + "set_cond_area": (["default", "mask bounds"],), + } + } + + RETURN_TYPES = ("CONDITIONING","CONDITIONING",) + RETURN_NAMES = ("combined_positive", "combined_negative",) + FUNCTION = "append" + CATEGORY = "KJNodes/masking/conditioning" + DESCRIPTION = """ +Bundles multiple conditioning mask and combine nodes into one,functionality is identical to ComfyUI native nodes +""" + + def append(self, positive_1, negative_1, positive_2, positive_3, positive_4, negative_2, negative_3, negative_4, mask_1, mask_2, mask_3, mask_4, set_cond_area, mask_1_strength, mask_2_strength, mask_3_strength, mask_4_strength): + c = [] + c2 = [] + set_area_to_bounds = False + if set_cond_area != "default": + set_area_to_bounds = True + if len(mask_1.shape) < 3: + mask_1 = mask_1.unsqueeze(0) + if len(mask_2.shape) < 3: + mask_2 = mask_2.unsqueeze(0) + if len(mask_3.shape) < 3: + mask_3 = mask_3.unsqueeze(0) + if len(mask_4.shape) < 3: + mask_4 = mask_4.unsqueeze(0) + for t in positive_1: + append_helper(t, mask_1, c, set_area_to_bounds, mask_1_strength) + for t in positive_2: + append_helper(t, mask_2, c, set_area_to_bounds, mask_2_strength) + for t in positive_3: + append_helper(t, mask_3, c, set_area_to_bounds, mask_3_strength) + for t in positive_4: + append_helper(t, mask_4, c, set_area_to_bounds, mask_4_strength) + for t in negative_1: + append_helper(t, mask_1, c2, set_area_to_bounds, mask_1_strength) + for t in negative_2: + append_helper(t, mask_2, c2, set_area_to_bounds, mask_2_strength) + for t in negative_3: + append_helper(t, mask_3, c2, set_area_to_bounds, mask_3_strength) + for t in negative_4: + append_helper(t, mask_4, c2, set_area_to_bounds, mask_4_strength) + return (c, c2) + +class ConditioningSetMaskAndCombine5: + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "positive_1": ("CONDITIONING", ), + "negative_1": ("CONDITIONING", ), + "positive_2": ("CONDITIONING", ), + "negative_2": ("CONDITIONING", ), + "positive_3": ("CONDITIONING", ), + "negative_3": ("CONDITIONING", ), + "positive_4": ("CONDITIONING", ), + "negative_4": ("CONDITIONING", ), + "positive_5": ("CONDITIONING", ), + "negative_5": ("CONDITIONING", ), + "mask_1": ("MASK", ), + "mask_2": ("MASK", ), + "mask_3": ("MASK", ), + "mask_4": ("MASK", ), + "mask_5": ("MASK", ), + "mask_1_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), + "mask_2_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), + "mask_3_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), + "mask_4_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), + "mask_5_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), + "set_cond_area": (["default", "mask bounds"],), + } + } + + RETURN_TYPES = ("CONDITIONING","CONDITIONING",) + RETURN_NAMES = ("combined_positive", "combined_negative",) + FUNCTION = "append" + CATEGORY = "KJNodes/masking/conditioning" + DESCRIPTION = """ +Bundles multiple conditioning mask and combine nodes into one,functionality is identical to ComfyUI native nodes +""" + + def append(self, positive_1, negative_1, positive_2, positive_3, positive_4, positive_5, negative_2, negative_3, negative_4, negative_5, mask_1, mask_2, mask_3, mask_4, mask_5, set_cond_area, mask_1_strength, mask_2_strength, mask_3_strength, mask_4_strength, mask_5_strength): + c = [] + c2 = [] + set_area_to_bounds = False + if set_cond_area != "default": + set_area_to_bounds = True + if len(mask_1.shape) < 3: + mask_1 = mask_1.unsqueeze(0) + if len(mask_2.shape) < 3: + mask_2 = mask_2.unsqueeze(0) + if len(mask_3.shape) < 3: + mask_3 = mask_3.unsqueeze(0) + if len(mask_4.shape) < 3: + mask_4 = mask_4.unsqueeze(0) + if len(mask_5.shape) < 3: + mask_5 = mask_5.unsqueeze(0) + for t in positive_1: + append_helper(t, mask_1, c, set_area_to_bounds, mask_1_strength) + for t in positive_2: + append_helper(t, mask_2, c, set_area_to_bounds, mask_2_strength) + for t in positive_3: + append_helper(t, mask_3, c, set_area_to_bounds, mask_3_strength) + for t in positive_4: + append_helper(t, mask_4, c, set_area_to_bounds, mask_4_strength) + for t in positive_5: + append_helper(t, mask_5, c, set_area_to_bounds, mask_5_strength) + for t in negative_1: + append_helper(t, mask_1, c2, set_area_to_bounds, mask_1_strength) + for t in negative_2: + append_helper(t, mask_2, c2, set_area_to_bounds, mask_2_strength) + for t in negative_3: + append_helper(t, mask_3, c2, set_area_to_bounds, mask_3_strength) + for t in negative_4: + append_helper(t, mask_4, c2, set_area_to_bounds, mask_4_strength) + for t in negative_5: + append_helper(t, mask_5, c2, set_area_to_bounds, mask_5_strength) + return (c, c2) + +class VRAM_Debug: + + @classmethod + + def INPUT_TYPES(s): + return { + "required": { + + "empty_cache": ("BOOLEAN", {"default": True}), + "gc_collect": ("BOOLEAN", {"default": True}), + "unload_all_models": ("BOOLEAN", {"default": False}), + }, + "optional": { + "any_input": (any, {}), + "image_pass": ("IMAGE",), + "model_pass": ("MODEL",), + } + } + + RETURN_TYPES = (any, "IMAGE","MODEL","INT", "INT",) + RETURN_NAMES = ("any_output", "image_pass", "model_pass", "freemem_before", "freemem_after") + FUNCTION = "VRAMdebug" + CATEGORY = "KJNodes/misc" + DESCRIPTION = """ +Returns the inputs unchanged, they are only used as triggers, +and performs comfy model management functions and garbage collection, +reports free VRAM before and after the operations. +""" + + def VRAMdebug(self, gc_collect, empty_cache, unload_all_models, image_pass=None, model_pass=None, any_input=None): + freemem_before = model_management.get_free_memory() + print("VRAMdebug: free memory before: ", f"{freemem_before:,.0f}") + if empty_cache: + model_management.soft_empty_cache() + if unload_all_models: + model_management.unload_all_models() + if gc_collect: + import gc + gc.collect() + freemem_after = model_management.get_free_memory() + print("VRAMdebug: free memory after: ", f"{freemem_after:,.0f}") + print("VRAMdebug: freed memory: ", f"{freemem_after - freemem_before:,.0f}") + return {"ui": { + "text": [f"{freemem_before:,.0f}x{freemem_after:,.0f}"]}, + "result": (any_input, image_pass, model_pass, freemem_before, freemem_after) + } + +class SomethingToString: + @classmethod + + def INPUT_TYPES(s): + return { + "required": { + "input": (any, {}), + }, + "optional": { + "prefix": ("STRING", {"default": ""}), + "suffix": ("STRING", {"default": ""}), + } + } + RETURN_TYPES = ("STRING",) + FUNCTION = "stringify" + CATEGORY = "KJNodes/text" + DESCRIPTION = """ +Converts any type to a string. +""" + + def stringify(self, input, prefix="", suffix=""): + if isinstance(input, (int, float, bool)): + stringified = str(input) + elif isinstance(input, list): + stringified = ', '.join(str(item) for item in input) + else: + return + if prefix: # Check if prefix is not empty + stringified = prefix + stringified # Add the prefix + if suffix: # Check if suffix is not empty + stringified = stringified + suffix # Add the suffix + + return (stringified,) + +class Sleep: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "input": (any, {}), + "minutes": ("INT", {"default": 0, "min": 0, "max": 1439}), + "seconds": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 59.99, "step": 0.01}), + }, + } + RETURN_TYPES = (any,) + FUNCTION = "sleepdelay" + CATEGORY = "KJNodes/misc" + DESCRIPTION = """ +Delays the execution for the input amount of time. +""" + + def sleepdelay(self, input, minutes, seconds): + total_seconds = minutes * 60 + seconds + time.sleep(total_seconds) + return input, + +class EmptyLatentImagePresets: + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "dimensions": ( + [ + '512 x 512 (1:1)', + '768 x 512 (1.5:1)', + '960 x 512 (1.875:1)', + '1024 x 512 (2:1)', + '1024 x 576 (1.778:1)', + '1536 x 640 (2.4:1)', + '1344 x 768 (1.75:1)', + '1216 x 832 (1.46:1)', + '1152 x 896 (1.286:1)', + '1024 x 1024 (1:1)', + ], + { + "default": '512 x 512 (1:1)' + }), + + "invert": ("BOOLEAN", {"default": False}), + "batch_size": ("INT", { + "default": 1, + "min": 1, + "max": 4096 + }), + }, + } + + RETURN_TYPES = ("LATENT", "INT", "INT") + RETURN_NAMES = ("Latent", "Width", "Height") + FUNCTION = "generate" + CATEGORY = "KJNodes/latents" + + def generate(self, dimensions, invert, batch_size): + from nodes import EmptyLatentImage + result = [x.strip() for x in dimensions.split('x')] + + # Remove the aspect ratio part + result[0] = result[0].split('(')[0].strip() + result[1] = result[1].split('(')[0].strip() + + if invert: + width = int(result[1].split(' ')[0]) + height = int(result[0]) + else: + width = int(result[0]) + height = int(result[1].split(' ')[0]) + latent = EmptyLatentImage().generate(width, height, batch_size)[0] + + return (latent, int(width), int(height),) + +class EmptyLatentImageCustomPresets: + @classmethod + def INPUT_TYPES(cls): + try: + with open(os.path.join(script_directory, 'custom_dimensions.json')) as f: + dimensions_dict = json.load(f) + except FileNotFoundError: + dimensions_dict = [] + return { + "required": { + "dimensions": ( + [f"{d['label']} - {d['value']}" for d in dimensions_dict], + ), + + "invert": ("BOOLEAN", {"default": False}), + "batch_size": ("INT", { + "default": 1, + "min": 1, + "max": 4096 + }), + }, + } + + RETURN_TYPES = ("LATENT", "INT", "INT") + RETURN_NAMES = ("Latent", "Width", "Height") + FUNCTION = "generate" + CATEGORY = "KJNodes/latents" + DESCRIPTION = """ +Generates an empty latent image with the specified dimensions. +The choices are loaded from 'custom_dimensions.json' in the nodes folder. +""" + + def generate(self, dimensions, invert, batch_size): + from nodes import EmptyLatentImage + # Split the string into label and value + label, value = dimensions.split(' - ') + # Split the value into width and height + width, height = [x.strip() for x in value.split('x')] + + if invert: + width, height = height, width + + latent = EmptyLatentImage().generate(int(width), int(height), batch_size)[0] + + return (latent, int(width), int(height),) + +class WidgetToString: + @classmethod + def IS_CHANGED(cls, **kwargs): + return float("NaN") + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "id": ("INT", {"default": 0}), + "widget_name": ("STRING", {"multiline": False}), + "return_all": ("BOOLEAN", {"default": False}), + }, + "optional": { + "any_input": (any, {}), + "node_title": ("STRING", {"multiline": False}), + "allowed_float_decimals": ("INT", {"default": 2, "min": 0, "max": 10, "tooltip": "Number of decimal places to display for float values"}), + + }, + "hidden": {"extra_pnginfo": "EXTRA_PNGINFO", + "prompt": "PROMPT", + "unique_id": "UNIQUE_ID",}, + } + + RETURN_TYPES = ("STRING", ) + FUNCTION = "get_widget_value" + CATEGORY = "KJNodes/text" + DESCRIPTION = """ +Selects a node and it's specified widget and outputs the value as a string. +If no node id or title is provided it will use the 'any_input' link and use that node. +To see node id's, enable node id display from Manager badge menu. +Alternatively you can search with the node title. Node titles ONLY exist if they +are manually edited! +The 'any_input' is required for making sure the node you want the value from exists in the workflow. +""" + + def get_widget_value(self, id, widget_name, extra_pnginfo, prompt, unique_id, return_all=False, any_input=None, node_title="", allowed_float_decimals=2): + workflow = extra_pnginfo["workflow"] + #print(json.dumps(workflow, indent=4)) + results = [] + node_id = None # Initialize node_id to handle cases where no match is found + link_id = None + link_to_node_map = {} + + for node in workflow["nodes"]: + if node_title: + if "title" in node: + if node["title"] == node_title: + node_id = node["id"] + break + else: + print("Node title not found.") + elif id != 0: + if node["id"] == id: + node_id = id + break + elif any_input is not None: + if node["type"] == "WidgetToString" and node["id"] == int(unique_id) and not link_id: + for node_input in node["inputs"]: + if node_input["name"] == "any_input": + link_id = node_input["link"] + + # Construct a map of links to node IDs for future reference + node_outputs = node.get("outputs", None) + if not node_outputs: + continue + for output in node_outputs: + node_links = output.get("links", None) + if not node_links: + continue + for link in node_links: + link_to_node_map[link] = node["id"] + if link_id and link == link_id: + break + + if link_id: + node_id = link_to_node_map.get(link_id, None) + + if node_id is None: + raise ValueError("No matching node found for the given title or id") + + values = prompt[str(node_id)] + if "inputs" in values: + if return_all: + # Format items based on type + formatted_items = [] + for k, v in values["inputs"].items(): + if isinstance(v, float): + item = f"{k}: {v:.{allowed_float_decimals}f}" + else: + item = f"{k}: {str(v)}" + formatted_items.append(item) + results.append(', '.join(formatted_items)) + elif widget_name in values["inputs"]: + v = values["inputs"][widget_name] + if isinstance(v, float): + v = f"{v:.{allowed_float_decimals}f}" + else: + v = str(v) + return (v, ) + else: + raise NameError(f"Widget not found: {node_id}.{widget_name}") + return (', '.join(results).strip(', '), ) + +class DummyOut: + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "any_input": (any, {}), + } + } + + RETURN_TYPES = (any,) + FUNCTION = "dummy" + CATEGORY = "KJNodes/misc" + OUTPUT_NODE = True + DESCRIPTION = """ +Does nothing, used to trigger generic workflow output. +A way to get previews in the UI without saving anything to disk. +""" + + def dummy(self, any_input): + return (any_input,) + +class FlipSigmasAdjusted: + @classmethod + def INPUT_TYPES(s): + return {"required": + {"sigmas": ("SIGMAS", ), + "divide_by_last_sigma": ("BOOLEAN", {"default": False}), + "divide_by": ("FLOAT", {"default": 1,"min": 1, "max": 255, "step": 0.01}), + "offset_by": ("INT", {"default": 1,"min": -100, "max": 100, "step": 1}), + } + } + RETURN_TYPES = ("SIGMAS", "STRING",) + RETURN_NAMES = ("SIGMAS", "sigmas_string",) + CATEGORY = "KJNodes/noise" + FUNCTION = "get_sigmas_adjusted" + + def get_sigmas_adjusted(self, sigmas, divide_by_last_sigma, divide_by, offset_by): + + sigmas = sigmas.flip(0) + if sigmas[0] == 0: + sigmas[0] = 0.0001 + adjusted_sigmas = sigmas.clone() + #offset sigma + for i in range(1, len(sigmas)): + offset_index = i - offset_by + if 0 <= offset_index < len(sigmas): + adjusted_sigmas[i] = sigmas[offset_index] + else: + adjusted_sigmas[i] = 0.0001 + if adjusted_sigmas[0] == 0: + adjusted_sigmas[0] = 0.0001 + if divide_by_last_sigma: + adjusted_sigmas = adjusted_sigmas / adjusted_sigmas[-1] + + sigma_np_array = adjusted_sigmas.numpy() + array_string = np.array2string(sigma_np_array, precision=2, separator=', ', threshold=np.inf) + adjusted_sigmas = adjusted_sigmas / divide_by + return (adjusted_sigmas, array_string,) + +class CustomSigmas: + @classmethod + def INPUT_TYPES(s): + return {"required": + { + "sigmas_string" :("STRING", {"default": "14.615, 6.475, 3.861, 2.697, 1.886, 1.396, 0.963, 0.652, 0.399, 0.152, 0.029","multiline": True}), + "interpolate_to_steps": ("INT", {"default": 10,"min": 0, "max": 255, "step": 1}), + } + } + RETURN_TYPES = ("SIGMAS",) + RETURN_NAMES = ("SIGMAS",) + CATEGORY = "KJNodes/noise" + FUNCTION = "customsigmas" + DESCRIPTION = """ +Creates a sigmas tensor from a string of comma separated values. +Examples: + +Nvidia's optimized AYS 10 step schedule for SD 1.5: +14.615, 6.475, 3.861, 2.697, 1.886, 1.396, 0.963, 0.652, 0.399, 0.152, 0.029 +SDXL: +14.615, 6.315, 3.771, 2.181, 1.342, 0.862, 0.555, 0.380, 0.234, 0.113, 0.029 +SVD: +700.00, 54.5, 15.886, 7.977, 4.248, 1.789, 0.981, 0.403, 0.173, 0.034, 0.002 +""" + def customsigmas(self, sigmas_string, interpolate_to_steps): + sigmas_list = sigmas_string.split(', ') + sigmas_float_list = [float(sigma) for sigma in sigmas_list] + sigmas_tensor = torch.FloatTensor(sigmas_float_list) + if len(sigmas_tensor) != interpolate_to_steps + 1: + sigmas_tensor = self.loglinear_interp(sigmas_tensor, interpolate_to_steps + 1) + sigmas_tensor[-1] = 0 + return (sigmas_tensor.float(),) + + def loglinear_interp(self, t_steps, num_steps): + """ + Performs log-linear interpolation of a given array of decreasing numbers. + """ + t_steps_np = t_steps.numpy() + + xs = np.linspace(0, 1, len(t_steps_np)) + ys = np.log(t_steps_np[::-1]) + + new_xs = np.linspace(0, 1, num_steps) + new_ys = np.interp(new_xs, xs, ys) + + interped_ys = np.exp(new_ys)[::-1].copy() + interped_ys_tensor = torch.tensor(interped_ys) + return interped_ys_tensor + +class StringToFloatList: + @classmethod + def INPUT_TYPES(s): + return {"required": + { + "string" :("STRING", {"default": "1, 2, 3", "multiline": True}), + } + } + RETURN_TYPES = ("FLOAT",) + RETURN_NAMES = ("FLOAT",) + CATEGORY = "KJNodes/misc" + FUNCTION = "createlist" + + def createlist(self, string): + float_list = [float(x.strip()) for x in string.split(',')] + return (float_list,) + + +class InjectNoiseToLatent: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "latents":("LATENT",), + "strength": ("FLOAT", {"default": 0.1, "min": 0.0, "max": 200.0, "step": 0.0001}), + "noise": ("LATENT",), + "normalize": ("BOOLEAN", {"default": False}), + "average": ("BOOLEAN", {"default": False}), + }, + "optional":{ + "mask": ("MASK", ), + "mix_randn_amount": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.001}), + "seed": ("INT", {"default": 123,"min": 0, "max": 0xffffffffffffffff, "step": 1}), + } + } + + RETURN_TYPES = ("LATENT",) + FUNCTION = "injectnoise" + CATEGORY = "KJNodes/noise" + + def injectnoise(self, latents, strength, noise, normalize, average, mix_randn_amount=0, seed=None, mask=None): + samples = latents["samples"].clone().cpu() + noise = noise["samples"].clone().cpu() + if samples.shape != samples.shape: + raise ValueError("InjectNoiseToLatent: Latent and noise must have the same shape") + if average: + noised = (samples + noise) / 2 + else: + noised = samples + noise * strength + if normalize: + noised = noised / noised.std() + if mask is not None: + mask = torch.nn.functional.interpolate(mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])), size=(noised.shape[2], noised.shape[3]), mode="bilinear") + mask = mask.expand((-1,noised.shape[1],-1,-1)) + if mask.shape[0] < noised.shape[0]: + mask = mask.repeat((noised.shape[0] -1) // mask.shape[0] + 1, 1, 1, 1)[:noised.shape[0]] + noised = mask * noised + (1-mask) * samples + if mix_randn_amount > 0: + if seed is not None: + generator = torch.manual_seed(seed) + rand_noise = torch.randn(noised.size(), dtype=noised.dtype, layout=noised.layout, generator=generator, device="cpu") + noised = noised + (mix_randn_amount * rand_noise) + + return ({"samples":noised},) + +class SoundReactive: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "sound_level": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 99999, "step": 0.01}), + "start_range_hz": ("INT", {"default": 150, "min": 0, "max": 9999, "step": 1}), + "end_range_hz": ("INT", {"default": 2000, "min": 0, "max": 9999, "step": 1}), + "multiplier": ("FLOAT", {"default": 1.0, "min": 0.01, "max": 99999, "step": 0.01}), + "smoothing_factor": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}), + "normalize": ("BOOLEAN", {"default": False}), + }, + } + + RETURN_TYPES = ("FLOAT","INT",) + RETURN_NAMES =("sound_level", "sound_level_int",) + FUNCTION = "react" + CATEGORY = "KJNodes/audio" + DESCRIPTION = """ +Reacts to the sound level of the input. +Uses your browsers sound input options and requires. +Meant to be used with realtime diffusion with autoqueue. +""" + + def react(self, sound_level, start_range_hz, end_range_hz, smoothing_factor, multiplier, normalize): + + sound_level *= multiplier + + if normalize: + sound_level /= 255 + + sound_level_int = int(sound_level) + return (sound_level, sound_level_int, ) + +class GenerateNoise: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "width": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}), + "height": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}), + "batch_size": ("INT", {"default": 1, "min": 1, "max": 4096}), + "seed": ("INT", {"default": 123,"min": 0, "max": 0xffffffffffffffff, "step": 1}), + "multiplier": ("FLOAT", {"default": 1.0,"min": 0.0, "max": 4096, "step": 0.01}), + "constant_batch_noise": ("BOOLEAN", {"default": False}), + "normalize": ("BOOLEAN", {"default": False}), + }, + "optional": { + "model": ("MODEL", ), + "sigmas": ("SIGMAS", ), + "latent_channels": (['4', '16', ],), + "shape": (["BCHW", "BCTHW","BTCHW",],), + } + } + + RETURN_TYPES = ("LATENT",) + FUNCTION = "generatenoise" + CATEGORY = "KJNodes/noise" + DESCRIPTION = """ +Generates noise for injection or to be used as empty latents on samplers with add_noise off. +""" + + def generatenoise(self, batch_size, width, height, seed, multiplier, constant_batch_noise, normalize, sigmas=None, model=None, latent_channels=4, shape="BCHW"): + + generator = torch.manual_seed(seed) + if shape == "BCHW": + noise = torch.randn([batch_size, int(latent_channels), height // 8, width // 8], dtype=torch.float32, layout=torch.strided, generator=generator, device="cpu") + elif shape == "BCTHW": + noise = torch.randn([1, int(latent_channels), batch_size,height // 8, width // 8], dtype=torch.float32, layout=torch.strided, generator=generator, device="cpu") + elif shape == "BTCHW": + noise = torch.randn([1, batch_size, int(latent_channels), height // 8, width // 8], dtype=torch.float32, layout=torch.strided, generator=generator, device="cpu") + if sigmas is not None: + sigma = sigmas[0] - sigmas[-1] + sigma /= model.model.latent_format.scale_factor + noise *= sigma + + noise *=multiplier + + if normalize: + noise = noise / noise.std() + if constant_batch_noise: + noise = noise[0].repeat(batch_size, 1, 1, 1) + + + return ({"samples":noise}, ) + +def camera_embeddings(elevation, azimuth): + elevation = torch.as_tensor([elevation]) + azimuth = torch.as_tensor([azimuth]) + embeddings = torch.stack( + [ + torch.deg2rad( + (90 - elevation) - (90) + ), # Zero123 polar is 90-elevation + torch.sin(torch.deg2rad(azimuth)), + torch.cos(torch.deg2rad(azimuth)), + torch.deg2rad( + 90 - torch.full_like(elevation, 0) + ), + ], dim=-1).unsqueeze(1) + + return embeddings + +def interpolate_angle(start, end, fraction): + # Calculate the difference in angles and adjust for wraparound if necessary + diff = (end - start + 540) % 360 - 180 + # Apply fraction to the difference + interpolated = start + fraction * diff + # Normalize the result to be within the range of -180 to 180 + return (interpolated + 180) % 360 - 180 + + +class StableZero123_BatchSchedule: + @classmethod + def INPUT_TYPES(s): + return {"required": { "clip_vision": ("CLIP_VISION",), + "init_image": ("IMAGE",), + "vae": ("VAE",), + "width": ("INT", {"default": 256, "min": 16, "max": MAX_RESOLUTION, "step": 8}), + "height": ("INT", {"default": 256, "min": 16, "max": MAX_RESOLUTION, "step": 8}), + "batch_size": ("INT", {"default": 1, "min": 1, "max": 4096}), + "interpolation": (["linear", "ease_in", "ease_out", "ease_in_out"],), + "azimuth_points_string": ("STRING", {"default": "0:(0.0),\n7:(1.0),\n15:(0.0)\n", "multiline": True}), + "elevation_points_string": ("STRING", {"default": "0:(0.0),\n7:(0.0),\n15:(0.0)\n", "multiline": True}), + }} + + RETURN_TYPES = ("CONDITIONING", "CONDITIONING", "LATENT") + RETURN_NAMES = ("positive", "negative", "latent") + FUNCTION = "encode" + CATEGORY = "KJNodes/experimental" + + def encode(self, clip_vision, init_image, vae, width, height, batch_size, azimuth_points_string, elevation_points_string, interpolation): + output = clip_vision.encode_image(init_image) + pooled = output.image_embeds.unsqueeze(0) + pixels = common_upscale(init_image.movedim(-1,1), width, height, "bilinear", "center").movedim(1,-1) + encode_pixels = pixels[:,:,:,:3] + t = vae.encode(encode_pixels) + + def ease_in(t): + return t * t + def ease_out(t): + return 1 - (1 - t) * (1 - t) + def ease_in_out(t): + return 3 * t * t - 2 * t * t * t + + # Parse the azimuth input string into a list of tuples + azimuth_points = [] + azimuth_points_string = azimuth_points_string.rstrip(',\n') + for point_str in azimuth_points_string.split(','): + frame_str, azimuth_str = point_str.split(':') + frame = int(frame_str.strip()) + azimuth = float(azimuth_str.strip()[1:-1]) + azimuth_points.append((frame, azimuth)) + # Sort the points by frame number + azimuth_points.sort(key=lambda x: x[0]) + + # Parse the elevation input string into a list of tuples + elevation_points = [] + elevation_points_string = elevation_points_string.rstrip(',\n') + for point_str in elevation_points_string.split(','): + frame_str, elevation_str = point_str.split(':') + frame = int(frame_str.strip()) + elevation_val = float(elevation_str.strip()[1:-1]) + elevation_points.append((frame, elevation_val)) + # Sort the points by frame number + elevation_points.sort(key=lambda x: x[0]) + + # Index of the next point to interpolate towards + next_point = 1 + next_elevation_point = 1 + + positive_cond_out = [] + positive_pooled_out = [] + negative_cond_out = [] + negative_pooled_out = [] + + #azimuth interpolation + for i in range(batch_size): + # Find the interpolated azimuth for the current frame + while next_point < len(azimuth_points) and i >= azimuth_points[next_point][0]: + next_point += 1 + # If next_point is equal to the length of points, we've gone past the last point + if next_point == len(azimuth_points): + next_point -= 1 # Set next_point to the last index of points + prev_point = max(next_point - 1, 0) # Ensure prev_point is not less than 0 + + # Calculate fraction + if azimuth_points[next_point][0] != azimuth_points[prev_point][0]: # Prevent division by zero + fraction = (i - azimuth_points[prev_point][0]) / (azimuth_points[next_point][0] - azimuth_points[prev_point][0]) + if interpolation == "ease_in": + fraction = ease_in(fraction) + elif interpolation == "ease_out": + fraction = ease_out(fraction) + elif interpolation == "ease_in_out": + fraction = ease_in_out(fraction) + + # Use the new interpolate_angle function + interpolated_azimuth = interpolate_angle(azimuth_points[prev_point][1], azimuth_points[next_point][1], fraction) + else: + interpolated_azimuth = azimuth_points[prev_point][1] + # Interpolate the elevation + next_elevation_point = 1 + while next_elevation_point < len(elevation_points) and i >= elevation_points[next_elevation_point][0]: + next_elevation_point += 1 + if next_elevation_point == len(elevation_points): + next_elevation_point -= 1 + prev_elevation_point = max(next_elevation_point - 1, 0) + + if elevation_points[next_elevation_point][0] != elevation_points[prev_elevation_point][0]: + fraction = (i - elevation_points[prev_elevation_point][0]) / (elevation_points[next_elevation_point][0] - elevation_points[prev_elevation_point][0]) + if interpolation == "ease_in": + fraction = ease_in(fraction) + elif interpolation == "ease_out": + fraction = ease_out(fraction) + elif interpolation == "ease_in_out": + fraction = ease_in_out(fraction) + + interpolated_elevation = interpolate_angle(elevation_points[prev_elevation_point][1], elevation_points[next_elevation_point][1], fraction) + else: + interpolated_elevation = elevation_points[prev_elevation_point][1] + + cam_embeds = camera_embeddings(interpolated_elevation, interpolated_azimuth) + cond = torch.cat([pooled, cam_embeds.repeat((pooled.shape[0], 1, 1))], dim=-1) + + positive_pooled_out.append(t) + positive_cond_out.append(cond) + negative_pooled_out.append(torch.zeros_like(t)) + negative_cond_out.append(torch.zeros_like(pooled)) + + # Concatenate the conditions and pooled outputs + final_positive_cond = torch.cat(positive_cond_out, dim=0) + final_positive_pooled = torch.cat(positive_pooled_out, dim=0) + final_negative_cond = torch.cat(negative_cond_out, dim=0) + final_negative_pooled = torch.cat(negative_pooled_out, dim=0) + + # Structure the final output + final_positive = [[final_positive_cond, {"concat_latent_image": final_positive_pooled}]] + final_negative = [[final_negative_cond, {"concat_latent_image": final_negative_pooled}]] + + latent = torch.zeros([batch_size, 4, height // 8, width // 8]) + return (final_positive, final_negative, {"samples": latent}) + +def linear_interpolate(start, end, fraction): + return start + (end - start) * fraction + +class SV3D_BatchSchedule: + @classmethod + def INPUT_TYPES(s): + return {"required": { "clip_vision": ("CLIP_VISION",), + "init_image": ("IMAGE",), + "vae": ("VAE",), + "width": ("INT", {"default": 576, "min": 16, "max": MAX_RESOLUTION, "step": 8}), + "height": ("INT", {"default": 576, "min": 16, "max": MAX_RESOLUTION, "step": 8}), + "batch_size": ("INT", {"default": 21, "min": 1, "max": 4096}), + "interpolation": (["linear", "ease_in", "ease_out", "ease_in_out"],), + "azimuth_points_string": ("STRING", {"default": "0:(0.0),\n9:(180.0),\n20:(360.0)\n", "multiline": True}), + "elevation_points_string": ("STRING", {"default": "0:(0.0),\n9:(0.0),\n20:(0.0)\n", "multiline": True}), + }} + + RETURN_TYPES = ("CONDITIONING", "CONDITIONING", "LATENT") + RETURN_NAMES = ("positive", "negative", "latent") + FUNCTION = "encode" + CATEGORY = "KJNodes/experimental" + DESCRIPTION = """ +Allow scheduling of the azimuth and elevation conditions for SV3D. +Note that SV3D is still a video model and the schedule needs to always go forward +https://huggingface.co/stabilityai/sv3d +""" + + def encode(self, clip_vision, init_image, vae, width, height, batch_size, azimuth_points_string, elevation_points_string, interpolation): + output = clip_vision.encode_image(init_image) + pooled = output.image_embeds.unsqueeze(0) + pixels = common_upscale(init_image.movedim(-1,1), width, height, "bilinear", "center").movedim(1,-1) + encode_pixels = pixels[:,:,:,:3] + t = vae.encode(encode_pixels) + + def ease_in(t): + return t * t + def ease_out(t): + return 1 - (1 - t) * (1 - t) + def ease_in_out(t): + return 3 * t * t - 2 * t * t * t + + # Parse the azimuth input string into a list of tuples + azimuth_points = [] + azimuth_points_string = azimuth_points_string.rstrip(',\n') + for point_str in azimuth_points_string.split(','): + frame_str, azimuth_str = point_str.split(':') + frame = int(frame_str.strip()) + azimuth = float(azimuth_str.strip()[1:-1]) + azimuth_points.append((frame, azimuth)) + # Sort the points by frame number + azimuth_points.sort(key=lambda x: x[0]) + + # Parse the elevation input string into a list of tuples + elevation_points = [] + elevation_points_string = elevation_points_string.rstrip(',\n') + for point_str in elevation_points_string.split(','): + frame_str, elevation_str = point_str.split(':') + frame = int(frame_str.strip()) + elevation_val = float(elevation_str.strip()[1:-1]) + elevation_points.append((frame, elevation_val)) + # Sort the points by frame number + elevation_points.sort(key=lambda x: x[0]) + + # Index of the next point to interpolate towards + next_point = 1 + next_elevation_point = 1 + elevations = [] + azimuths = [] + # For azimuth interpolation + for i in range(batch_size): + # Find the interpolated azimuth for the current frame + while next_point < len(azimuth_points) and i >= azimuth_points[next_point][0]: + next_point += 1 + if next_point == len(azimuth_points): + next_point -= 1 + prev_point = max(next_point - 1, 0) + + if azimuth_points[next_point][0] != azimuth_points[prev_point][0]: + fraction = (i - azimuth_points[prev_point][0]) / (azimuth_points[next_point][0] - azimuth_points[prev_point][0]) + # Apply the ease function to the fraction + if interpolation == "ease_in": + fraction = ease_in(fraction) + elif interpolation == "ease_out": + fraction = ease_out(fraction) + elif interpolation == "ease_in_out": + fraction = ease_in_out(fraction) + + interpolated_azimuth = linear_interpolate(azimuth_points[prev_point][1], azimuth_points[next_point][1], fraction) + else: + interpolated_azimuth = azimuth_points[prev_point][1] + + # Interpolate the elevation + next_elevation_point = 1 + while next_elevation_point < len(elevation_points) and i >= elevation_points[next_elevation_point][0]: + next_elevation_point += 1 + if next_elevation_point == len(elevation_points): + next_elevation_point -= 1 + prev_elevation_point = max(next_elevation_point - 1, 0) + + if elevation_points[next_elevation_point][0] != elevation_points[prev_elevation_point][0]: + fraction = (i - elevation_points[prev_elevation_point][0]) / (elevation_points[next_elevation_point][0] - elevation_points[prev_elevation_point][0]) + # Apply the ease function to the fraction + if interpolation == "ease_in": + fraction = ease_in(fraction) + elif interpolation == "ease_out": + fraction = ease_out(fraction) + elif interpolation == "ease_in_out": + fraction = ease_in_out(fraction) + + interpolated_elevation = linear_interpolate(elevation_points[prev_elevation_point][1], elevation_points[next_elevation_point][1], fraction) + else: + interpolated_elevation = elevation_points[prev_elevation_point][1] + + azimuths.append(interpolated_azimuth) + elevations.append(interpolated_elevation) + + #print("azimuths", azimuths) + #print("elevations", elevations) + + # Structure the final output + final_positive = [[pooled, {"concat_latent_image": t, "elevation": elevations, "azimuth": azimuths}]] + final_negative = [[torch.zeros_like(pooled), {"concat_latent_image": torch.zeros_like(t),"elevation": elevations, "azimuth": azimuths}]] + + latent = torch.zeros([batch_size, 4, height // 8, width // 8]) + return (final_positive, final_negative, {"samples": latent}) + +class LoadResAdapterNormalization: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "model": ("MODEL",), + "resadapter_path": (folder_paths.get_filename_list("checkpoints"), ) + } + } + + RETURN_TYPES = ("MODEL",) + FUNCTION = "load_res_adapter" + CATEGORY = "KJNodes/experimental" + + def load_res_adapter(self, model, resadapter_path): + print("ResAdapter: Checking ResAdapter path") + resadapter_full_path = folder_paths.get_full_path("checkpoints", resadapter_path) + if not os.path.exists(resadapter_full_path): + raise Exception("Invalid model path") + else: + print("ResAdapter: Loading ResAdapter normalization weights") + from comfy.utils import load_torch_file + prefix_to_remove = 'diffusion_model.' + model_clone = model.clone() + norm_state_dict = load_torch_file(resadapter_full_path) + new_values = {key[len(prefix_to_remove):]: value for key, value in norm_state_dict.items() if key.startswith(prefix_to_remove)} + print("ResAdapter: Attempting to add patches with ResAdapter weights") + try: + for key in model.model.diffusion_model.state_dict().keys(): + if key in new_values: + original_tensor = model.model.diffusion_model.state_dict()[key] + new_tensor = new_values[key].to(model.model.diffusion_model.dtype) + if original_tensor.shape == new_tensor.shape: + model_clone.add_object_patch(f"diffusion_model.{key}.data", new_tensor) + else: + print("ResAdapter: No match for key: ",key) + except: + raise Exception("Could not patch model, this way of patching was added to ComfyUI on March 3rd 2024, is your ComfyUI up to date?") + print("ResAdapter: Added resnet normalization patches") + return (model_clone, ) + +class Superprompt: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "instruction_prompt": ("STRING", {"default": 'Expand the following prompt to add more detail', "multiline": True}), + "prompt": ("STRING", {"default": '', "multiline": True, "forceInput": True}), + "max_new_tokens": ("INT", {"default": 128, "min": 1, "max": 4096, "step": 1}), + } + } + + RETURN_TYPES = ("STRING",) + FUNCTION = "process" + CATEGORY = "KJNodes/text" + DESCRIPTION = """ +# SuperPrompt +A T5 model fine-tuned on the SuperPrompt dataset for +upsampling text prompts to more detailed descriptions. +Meant to be used as a pre-generation step for text-to-image +models that benefit from more detailed prompts. +https://huggingface.co/roborovski/superprompt-v1 +""" + + def process(self, instruction_prompt, prompt, max_new_tokens): + device = model_management.get_torch_device() + from transformers import T5Tokenizer, T5ForConditionalGeneration + + checkpoint_path = os.path.join(script_directory, "models","superprompt-v1") + if not os.path.exists(checkpoint_path): + print(f"Downloading model to: {checkpoint_path}") + from huggingface_hub import snapshot_download + snapshot_download(repo_id="roborovski/superprompt-v1", + local_dir=checkpoint_path, + local_dir_use_symlinks=False) + tokenizer = T5Tokenizer.from_pretrained("google/flan-t5-small", legacy=False) + + model = T5ForConditionalGeneration.from_pretrained(checkpoint_path, device_map=device) + model.to(device) + input_text = instruction_prompt + ": " + prompt + + input_ids = tokenizer(input_text, return_tensors="pt").input_ids.to(device) + outputs = model.generate(input_ids, max_new_tokens=max_new_tokens) + out = (tokenizer.decode(outputs[0])) + out = out.replace('', '') + out = out.replace('', '') + + return (out, ) + + +class CameraPoseVisualizer: + + @classmethod + def INPUT_TYPES(s): + return {"required": { + "pose_file_path": ("STRING", {"default": '', "multiline": False}), + "base_xval": ("FLOAT", {"default": 0.2,"min": 0, "max": 100, "step": 0.01}), + "zval": ("FLOAT", {"default": 0.3,"min": 0, "max": 100, "step": 0.01}), + "scale": ("FLOAT", {"default": 1.0,"min": 0.01, "max": 10.0, "step": 0.01}), + "use_exact_fx": ("BOOLEAN", {"default": False}), + "relative_c2w": ("BOOLEAN", {"default": True}), + "use_viewer": ("BOOLEAN", {"default": False}), + }, + "optional": { + "cameractrl_poses": ("CAMERACTRL_POSES", {"default": None}), + } + } + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "plot" + CATEGORY = "KJNodes/misc" + DESCRIPTION = """ +Visualizes the camera poses, from Animatediff-Evolved CameraCtrl Pose +or a .txt file with RealEstate camera intrinsics and coordinates, in a 3D plot. +""" + + def plot(self, pose_file_path, scale, base_xval, zval, use_exact_fx, relative_c2w, use_viewer, cameractrl_poses=None): + import matplotlib as mpl + import matplotlib.pyplot as plt + from torchvision.transforms import ToTensor + + x_min = -2.0 * scale + x_max = 2.0 * scale + y_min = -2.0 * scale + y_max = 2.0 * scale + z_min = -2.0 * scale + z_max = 2.0 * scale + plt.rcParams['text.color'] = '#999999' + self.fig = plt.figure(figsize=(18, 7)) + self.fig.patch.set_facecolor('#353535') + self.ax = self.fig.add_subplot(projection='3d') + self.ax.set_facecolor('#353535') # Set the background color here + self.ax.grid(color='#999999', linestyle='-', linewidth=0.5) + self.plotly_data = None # plotly data traces + self.ax.set_aspect("auto") + self.ax.set_xlim(x_min, x_max) + self.ax.set_ylim(y_min, y_max) + self.ax.set_zlim(z_min, z_max) + self.ax.set_xlabel('x', color='#999999') + self.ax.set_ylabel('y', color='#999999') + self.ax.set_zlabel('z', color='#999999') + for text in self.ax.get_xticklabels() + self.ax.get_yticklabels() + self.ax.get_zticklabels(): + text.set_color('#999999') + print('initialize camera pose visualizer') + + if pose_file_path != "": + with open(pose_file_path, 'r') as f: + poses = f.readlines() + w2cs = [np.asarray([float(p) for p in pose.strip().split(' ')[7:]]).reshape(3, 4) for pose in poses[1:]] + fxs = [float(pose.strip().split(' ')[1]) for pose in poses[1:]] + #print(poses) + elif cameractrl_poses is not None: + poses = cameractrl_poses + w2cs = [np.array(pose[7:]).reshape(3, 4) for pose in cameractrl_poses] + fxs = [pose[1] for pose in cameractrl_poses] + else: + raise ValueError("Please provide either pose_file_path or cameractrl_poses") + + total_frames = len(w2cs) + transform_matrix = np.asarray([[1, 0, 0, 0], [0, 0, 1, 0], [0, -1, 0, 0], [0, 0, 0, 1]]).reshape(4, 4) + last_row = np.zeros((1, 4)) + last_row[0, -1] = 1.0 + + w2cs = [np.concatenate((w2c, last_row), axis=0) for w2c in w2cs] + c2ws = self.get_c2w(w2cs, transform_matrix, relative_c2w) + + for frame_idx, c2w in enumerate(c2ws): + self.extrinsic2pyramid(c2w, frame_idx / total_frames, hw_ratio=1/1, base_xval=base_xval, + zval=(fxs[frame_idx] if use_exact_fx else zval)) + + # Create the colorbar + cmap = mpl.cm.rainbow + norm = mpl.colors.Normalize(vmin=0, vmax=total_frames) + colorbar = self.fig.colorbar(mpl.cm.ScalarMappable(norm=norm, cmap=cmap), ax=self.ax, orientation='vertical') + + # Change the colorbar label + colorbar.set_label('Frame', color='#999999') # Change the label and its color + + # Change the tick colors + colorbar.ax.yaxis.set_tick_params(colors='#999999') # Change the tick color + + # Change the tick frequency + # Assuming you want to set the ticks at every 10th frame + ticks = np.arange(0, total_frames, 10) + colorbar.ax.yaxis.set_ticks(ticks) + + plt.title('') + plt.draw() + buf = io.BytesIO() + plt.savefig(buf, format='png', bbox_inches='tight', pad_inches=0) + buf.seek(0) + img = Image.open(buf) + tensor_img = ToTensor()(img) + buf.close() + tensor_img = tensor_img.permute(1, 2, 0).unsqueeze(0) + if use_viewer: + time.sleep(1) + plt.show() + return (tensor_img,) + + def extrinsic2pyramid(self, extrinsic, color_map='red', hw_ratio=1/1, base_xval=1, zval=3): + from mpl_toolkits.mplot3d.art3d import Poly3DCollection + vertex_std = np.array([[0, 0, 0, 1], + [base_xval, -base_xval * hw_ratio, zval, 1], + [base_xval, base_xval * hw_ratio, zval, 1], + [-base_xval, base_xval * hw_ratio, zval, 1], + [-base_xval, -base_xval * hw_ratio, zval, 1]]) + vertex_transformed = vertex_std @ extrinsic.T + meshes = [[vertex_transformed[0, :-1], vertex_transformed[1][:-1], vertex_transformed[2, :-1]], + [vertex_transformed[0, :-1], vertex_transformed[2, :-1], vertex_transformed[3, :-1]], + [vertex_transformed[0, :-1], vertex_transformed[3, :-1], vertex_transformed[4, :-1]], + [vertex_transformed[0, :-1], vertex_transformed[4, :-1], vertex_transformed[1, :-1]], + [vertex_transformed[1, :-1], vertex_transformed[2, :-1], vertex_transformed[3, :-1], vertex_transformed[4, :-1]]] + + color = color_map if isinstance(color_map, str) else plt.cm.rainbow(color_map) + + self.ax.add_collection3d( + Poly3DCollection(meshes, facecolors=color, linewidths=0.3, edgecolors=color, alpha=0.25)) + + def customize_legend(self, list_label): + from matplotlib.patches import Patch + import matplotlib.pyplot as plt + list_handle = [] + for idx, label in enumerate(list_label): + color = plt.cm.rainbow(idx / len(list_label)) + patch = Patch(color=color, label=label) + list_handle.append(patch) + plt.legend(loc='right', bbox_to_anchor=(1.8, 0.5), handles=list_handle) + + def get_c2w(self, w2cs, transform_matrix, relative_c2w): + if relative_c2w: + target_cam_c2w = np.array([ + [1, 0, 0, 0], + [0, 1, 0, 0], + [0, 0, 1, 0], + [0, 0, 0, 1] + ]) + abs2rel = target_cam_c2w @ w2cs[0] + ret_poses = [target_cam_c2w, ] + [abs2rel @ np.linalg.inv(w2c) for w2c in w2cs[1:]] + else: + ret_poses = [np.linalg.inv(w2c) for w2c in w2cs] + ret_poses = [transform_matrix @ x for x in ret_poses] + return np.array(ret_poses, dtype=np.float32) + + + +class StabilityAPI_SD3: + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "prompt": ("STRING", {"multiline": True}), + "n_prompt": ("STRING", {"multiline": True}), + "seed": ("INT", {"default": 123,"min": 0, "max": 4294967294, "step": 1}), + "model": ( + [ + 'sd3', + 'sd3-turbo', + ], + { + "default": 'sd3' + }), + "aspect_ratio": ( + [ + '1:1', + '16:9', + '21:9', + '2:3', + '3:2', + '4:5', + '5:4', + '9:16', + '9:21', + ], + { + "default": '1:1' + }), + "output_format": ( + [ + 'png', + 'jpeg', + ], + { + "default": 'jpeg' + }), + }, + "optional": { + "api_key": ("STRING", {"multiline": True}), + "image": ("IMAGE",), + "img2img_strength": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}), + "disable_metadata": ("BOOLEAN", {"default": True}), + }, + } + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "apicall" + + CATEGORY = "KJNodes/experimental" + DESCRIPTION = """ +## Calls StabilityAI API + +Although you may have multiple keys in your account, +you should use the same key for all requests to this API. + +Get your API key here: https://platform.stability.ai/account/keys +Recommended to set the key in the config.json -file under this +node packs folder. +# WARNING: +Otherwise the API key may get saved in the image metadata even +with "disable_metadata" on if the workflow includes save nodes +separate from this node. + +sd3 requires 6.5 credits per generation +sd3-turbo requires 4 credits per generation + +If no image is provided, mode is set to text-to-image + +""" + + def apicall(self, prompt, n_prompt, model, seed, aspect_ratio, output_format, + img2img_strength=0.5, image=None, disable_metadata=True, api_key=""): + from comfy.cli_args import args + if disable_metadata: + args.disable_metadata = True + else: + args.disable_metadata = False + + import requests + from torchvision import transforms + + data = { + "mode": "text-to-image", + "prompt": prompt, + "model": model, + "seed": seed, + "output_format": output_format + } + + if image is not None: + image = image.permute(0, 3, 1, 2).squeeze(0) + to_pil = transforms.ToPILImage() + pil_image = to_pil(image) + # Save the PIL Image to a BytesIO object + buffer = io.BytesIO() + pil_image.save(buffer, format='PNG') + buffer.seek(0) + files = {"image": ("image.png", buffer, "image/png")} + + data["mode"] = "image-to-image" + data["image"] = pil_image + data["strength"] = img2img_strength + else: + data["aspect_ratio"] = aspect_ratio, + files = {"none": ''} + + if model != "sd3-turbo": + data["negative_prompt"] = n_prompt + + headers={ + "accept": "image/*" + } + + if api_key != "": + headers["authorization"] = api_key + else: + config_file_path = os.path.join(script_directory,"config.json") + with open(config_file_path, 'r') as file: + config = json.load(file) + api_key_from_config = config.get("sai_api_key") + headers["authorization"] = api_key_from_config + + response = requests.post( + f"https://api.stability.ai/v2beta/stable-image/generate/sd3", + headers=headers, + files = files, + data = data, + ) + + if response.status_code == 200: + # Convert the response content to a PIL Image + image = Image.open(io.BytesIO(response.content)) + # Convert the PIL Image to a PyTorch tensor + transform = transforms.ToTensor() + tensor_image = transform(image) + tensor_image = tensor_image.unsqueeze(0) + tensor_image = tensor_image.permute(0, 2, 3, 1).cpu().float() + return (tensor_image,) + else: + try: + # Attempt to parse the response as JSON + error_data = response.json() + raise Exception(f"Server error: {error_data}") + except json.JSONDecodeError: + # If the response is not valid JSON, raise a different exception + raise Exception(f"Server error: {response.text}") + +class CheckpointPerturbWeights: + + @classmethod + def INPUT_TYPES(s): + return {"required": { + "model": ("MODEL",), + "joint_blocks": ("FLOAT", {"default": 0.02, "min": 0.001, "max": 10.0, "step": 0.001}), + "final_layer": ("FLOAT", {"default": 0.02, "min": 0.001, "max": 10.0, "step": 0.001}), + "rest_of_the_blocks": ("FLOAT", {"default": 0.02, "min": 0.001, "max": 10.0, "step": 0.001}), + "seed": ("INT", {"default": 123,"min": 0, "max": 0xffffffffffffffff, "step": 1}), + } + } + RETURN_TYPES = ("MODEL",) + FUNCTION = "mod" + OUTPUT_NODE = True + + CATEGORY = "KJNodes/experimental" + + def mod(self, seed, model, joint_blocks, final_layer, rest_of_the_blocks): + import copy + torch.manual_seed(seed) + torch.cuda.manual_seed_all(seed) + device = model_management.get_torch_device() + model_copy = copy.deepcopy(model) + model_copy.model.to(device) + keys = model_copy.model.diffusion_model.state_dict().keys() + + dict = {} + for key in keys: + dict[key] = model_copy.model.diffusion_model.state_dict()[key] + + pbar = ProgressBar(len(keys)) + for k in keys: + v = dict[k] + print(f'{k}: {v.std()}') + if k.startswith('joint_blocks'): + multiplier = joint_blocks + elif k.startswith('final_layer'): + multiplier = final_layer + else: + multiplier = rest_of_the_blocks + dict[k] += torch.normal(torch.zeros_like(v) * v.mean(), torch.ones_like(v) * v.std() * multiplier).to(device) + pbar.update(1) + model_copy.model.diffusion_model.load_state_dict(dict) + return model_copy, + +class DifferentialDiffusionAdvanced(): + @classmethod + def INPUT_TYPES(s): + return {"required": { + "model": ("MODEL", ), + "samples": ("LATENT",), + "mask": ("MASK",), + "multiplier": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.001}), + }} + RETURN_TYPES = ("MODEL", "LATENT") + FUNCTION = "apply" + CATEGORY = "_for_testing" + INIT = False + + def apply(self, model, samples, mask, multiplier): + self.multiplier = multiplier + model = model.clone() + model.set_model_denoise_mask_function(self.forward) + s = samples.copy() + s["noise_mask"] = mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])) + return (model, s) + + def forward(self, sigma: torch.Tensor, denoise_mask: torch.Tensor, extra_options: dict): + model = extra_options["model"] + step_sigmas = extra_options["sigmas"] + sigma_to = model.inner_model.model_sampling.sigma_min + if step_sigmas[-1] > sigma_to: + sigma_to = step_sigmas[-1] + sigma_from = step_sigmas[0] + + ts_from = model.inner_model.model_sampling.timestep(sigma_from) + ts_to = model.inner_model.model_sampling.timestep(sigma_to) + current_ts = model.inner_model.model_sampling.timestep(sigma[0]) + + threshold = (current_ts - ts_to) / (ts_from - ts_to) / self.multiplier + + return (denoise_mask >= threshold).to(denoise_mask.dtype) + +class FluxBlockLoraSelect: + def __init__(self): + self.loaded_lora = None + + @classmethod + def INPUT_TYPES(s): + arg_dict = {} + argument = ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}) + + for i in range(19): + arg_dict["double_blocks.{}.".format(i)] = argument + + for i in range(38): + arg_dict["single_blocks.{}.".format(i)] = argument + + return {"required": arg_dict} + + RETURN_TYPES = ("SELECTEDBLOCKS", ) + RETURN_NAMES = ("blocks", ) + OUTPUT_TOOLTIPS = ("The modified diffusion model.",) + FUNCTION = "load_lora" + + CATEGORY = "KJNodes/experimental" + DESCRIPTION = "Select individual block alpha values, value of 0 removes the block altogether" + + def load_lora(self, **kwargs): + return (kwargs,) + +class HunyuanVideoBlockLoraSelect: + def __init__(self): + self.loaded_lora = None + + @classmethod + def INPUT_TYPES(s): + arg_dict = {} + argument = ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.01}) + + for i in range(20): + arg_dict["double_blocks.{}.".format(i)] = argument + + for i in range(40): + arg_dict["single_blocks.{}.".format(i)] = argument + + return {"required": arg_dict} + + RETURN_TYPES = ("SELECTEDBLOCKS", ) + RETURN_NAMES = ("blocks", ) + OUTPUT_TOOLTIPS = ("The modified diffusion model.",) + FUNCTION = "load_lora" + + CATEGORY = "KJNodes/experimental" + DESCRIPTION = "Select individual block alpha values, value of 0 removes the block altogether" + + def load_lora(self, **kwargs): + return (kwargs,) + +class FluxBlockLoraLoader: + def __init__(self): + self.loaded_lora = None + + @classmethod + def INPUT_TYPES(s): + return {"required": { + "model": ("MODEL", {"tooltip": "The diffusion model the LoRA will be applied to."}), + "strength_model": ("FLOAT", {"default": 1.0, "min": -100.0, "max": 100.0, "step": 0.01, "tooltip": "How strongly to modify the diffusion model. This value can be negative."}), + + }, + "optional": { + "lora_name": (folder_paths.get_filename_list("loras"), {"tooltip": "The name of the LoRA."}), + "opt_lora_path": ("STRING", {"forceInput": True, "tooltip": "Absolute path of the LoRA."}), + "blocks": ("SELECTEDBLOCKS",), + } + } + + RETURN_TYPES = ("MODEL", "STRING", ) + RETURN_NAMES = ("model", "rank", ) + OUTPUT_TOOLTIPS = ("The modified diffusion model.", "possible rank of the LoRA.") + FUNCTION = "load_lora" + CATEGORY = "KJNodes/experimental" + + def load_lora(self, model, strength_model, lora_name=None, opt_lora_path=None, blocks=None): + + import comfy.lora + + if opt_lora_path: + lora_path = opt_lora_path + else: + lora_path = folder_paths.get_full_path("loras", lora_name) + + lora = None + if self.loaded_lora is not None: + if self.loaded_lora[0] == lora_path: + lora = self.loaded_lora[1] + else: + temp = self.loaded_lora + self.loaded_lora = None + del temp + + if lora is None: + lora = load_torch_file(lora_path, safe_load=True) + # Find the first key that ends with "weight" + rank = "unknown" + weight_key = next((key for key in lora.keys() if key.endswith('weight')), None) + # Print the shape of the value corresponding to the key + if weight_key: + print(f"Shape of the first 'weight' key ({weight_key}): {lora[weight_key].shape}") + rank = str(lora[weight_key].shape[0]) + else: + print("No key ending with 'weight' found.") + rank = "Couldn't find rank" + self.loaded_lora = (lora_path, lora) + + key_map = {} + if model is not None: + key_map = comfy.lora.model_lora_keys_unet(model.model, key_map) + + loaded = comfy.lora.load_lora(lora, key_map) + + if blocks is not None: + keys_to_delete = [] + + for block in blocks: + for key in list(loaded.keys()): + match = False + if isinstance(key, str) and block in key: + match = True + elif isinstance(key, tuple): + for k in key: + if block in k: + match = True + break + + if match: + ratio = blocks[block] + if ratio == 0: + keys_to_delete.append(key) + else: + value = loaded[key] + if isinstance(value, tuple) and len(value) > 1 and isinstance(value[1], tuple): + inner_tuple = value[1] + if len(inner_tuple) >= 3: + inner_tuple = (inner_tuple[0], inner_tuple[1], ratio, *inner_tuple[3:]) + loaded[key] = (value[0], inner_tuple) + else: + loaded[key] = (value[0], ratio) + + for key in keys_to_delete: + del loaded[key] + + print("loading lora keys:") + for key, value in loaded.items(): + if isinstance(value, tuple) and len(value) > 1 and isinstance(value[1], tuple): + inner_tuple = value[1] + alpha = inner_tuple[2] if len(inner_tuple) >= 3 else None + else: + alpha = value[1] if len(value) > 1 else None + print(f"Key: {key}, Alpha: {alpha}") + + + if model is not None: + new_modelpatcher = model.clone() + k = new_modelpatcher.add_patches(loaded, strength_model) + + k = set(k) + for x in loaded: + if (x not in k): + print("NOT LOADED {}".format(x)) + + return (new_modelpatcher, rank) + +class CustomControlNetWeightsFluxFromList: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "list_of_floats": ("FLOAT", {"forceInput": True}, ), + }, + "optional": { + "uncond_multiplier": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}, ), + "cn_extras": ("CN_WEIGHTS_EXTRAS",), + "autosize": ("ACNAUTOSIZE", {"padding": 0}), + } + } + + RETURN_TYPES = ("CONTROL_NET_WEIGHTS", "TIMESTEP_KEYFRAME",) + RETURN_NAMES = ("CN_WEIGHTS", "TK_SHORTCUT") + FUNCTION = "load_weights" + DESCRIPTION = "Creates controlnet weights from a list of floats for Advanced-ControlNet" + + CATEGORY = "KJNodes/controlnet" + + def load_weights(self, list_of_floats: list[float], + uncond_multiplier: float=1.0, cn_extras: dict[str]={}): + + adv_control = importlib.import_module("ComfyUI-Advanced-ControlNet.adv_control") + ControlWeights = adv_control.utils.ControlWeights + TimestepKeyframeGroup = adv_control.utils.TimestepKeyframeGroup + TimestepKeyframe = adv_control.utils.TimestepKeyframe + + weights = ControlWeights.controlnet(weights_input=list_of_floats, uncond_multiplier=uncond_multiplier, extras=cn_extras) + print(weights.weights_input) + return (weights, TimestepKeyframeGroup.default(TimestepKeyframe(control_weights=weights))) + +SHAKKERLABS_UNION_CONTROLNET_TYPES = { + "canny": 0, + "tile": 1, + "depth": 2, + "blur": 3, + "pose": 4, + "gray": 5, + "low quality": 6, +} + +class SetShakkerLabsUnionControlNetType: + @classmethod + def INPUT_TYPES(s): + return {"required": {"control_net": ("CONTROL_NET", ), + "type": (["auto"] + list(SHAKKERLABS_UNION_CONTROLNET_TYPES.keys()),) + }} + + CATEGORY = "conditioning/controlnet" + RETURN_TYPES = ("CONTROL_NET",) + + FUNCTION = "set_controlnet_type" + + def set_controlnet_type(self, control_net, type): + control_net = control_net.copy() + type_number = SHAKKERLABS_UNION_CONTROLNET_TYPES.get(type, -1) + if type_number >= 0: + control_net.set_extra_arg("control_type", [type_number]) + else: + control_net.set_extra_arg("control_type", []) + + return (control_net,) + +class ModelSaveKJ: + def __init__(self): + self.output_dir = folder_paths.get_output_directory() + + @classmethod + def INPUT_TYPES(s): + return {"required": { "model": ("MODEL",), + "filename_prefix": ("STRING", {"default": "diffusion_models/ComfyUI"}), + "model_key_prefix": ("STRING", {"default": "model.diffusion_model."}), + }, + "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"},} + RETURN_TYPES = () + FUNCTION = "save" + OUTPUT_NODE = True + + CATEGORY = "advanced/model_merging" + + def save(self, model, filename_prefix, model_key_prefix, prompt=None, extra_pnginfo=None): + from comfy.utils import save_torch_file + full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(filename_prefix, self.output_dir) + + output_checkpoint = f"{filename}_{counter:05}_.safetensors" + output_checkpoint = os.path.join(full_output_folder, output_checkpoint) + + load_models = [model] + + model_management.load_models_gpu(load_models, force_patch_weights=True) + default_prefix = "model.diffusion_model." + + sd = model.model.state_dict_for_saving(None, None, None) + + new_sd = {} + for k in sd: + if k.startswith(default_prefix): + new_key = model_key_prefix + k[len(default_prefix):] + else: + new_key = k # In case the key doesn't start with the default prefix, keep it unchanged + t = sd[k] + if not t.is_contiguous(): + t = t.contiguous() + new_sd[new_key] = t + print(full_output_folder) + if not os.path.exists(full_output_folder): + os.makedirs(full_output_folder) + save_torch_file(new_sd, os.path.join(full_output_folder, output_checkpoint)) + return {} + +class StyleModelApplyAdvanced: + @classmethod + def INPUT_TYPES(s): + return {"required": {"conditioning": ("CONDITIONING", ), + "style_model": ("STYLE_MODEL", ), + "clip_vision_output": ("CLIP_VISION_OUTPUT", ), + "strength": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.001}), + }} + RETURN_TYPES = ("CONDITIONING",) + FUNCTION = "apply_stylemodel" + CATEGORY = "KJNodes/experimental" + DESCRIPTION = "StyleModelApply but with strength parameter" + + def apply_stylemodel(self, clip_vision_output, style_model, conditioning, strength=1.0): + cond = style_model.get_cond(clip_vision_output).flatten(start_dim=0, end_dim=1).unsqueeze(dim=0) + cond = strength * cond + c = [] + for t in conditioning: + n = [torch.cat((t[0], cond), dim=1), t[1].copy()] + c.append(n) + return (c, ) + +class AudioConcatenate: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "audio1": ("AUDIO",), + "audio2": ("AUDIO",), + "direction": ( + [ 'right', + 'left', + ], + { + "default": 'right' + }), + }} + + RETURN_TYPES = ("AUDIO",) + FUNCTION = "concanate" + CATEGORY = "KJNodes/audio" + DESCRIPTION = """ +Concatenates the audio1 to audio2 in the specified direction. +""" + + def concanate(self, audio1, audio2, direction): + sample_rate_1 = audio1["sample_rate"] + sample_rate_2 = audio2["sample_rate"] + if sample_rate_1 != sample_rate_2: + raise Exception("Sample rates of the two audios do not match") + + waveform_1 = audio1["waveform"] + print(waveform_1.shape) + waveform_2 = audio2["waveform"] + + # Concatenate based on the specified direction + if direction == 'right': + concatenated_audio = torch.cat((waveform_1, waveform_2), dim=2) # Concatenate along width + elif direction == 'left': + concatenated_audio= torch.cat((waveform_2, waveform_1), dim=2) # Concatenate along width + return ({"waveform": concatenated_audio, "sample_rate": sample_rate_1},) + +class LeapfusionHunyuanI2V: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "model": ("MODEL",), + "latent": ("LATENT",), + "index": ("INT", {"default": 0, "min": -1, "max": 1000, "step": 1,"tooltip": "The index of the latent to be replaced. 0 for first frame and -1 for last"}), + "start_percent": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.01, "tooltip": "The start percentage of steps to apply"}), + "end_percent": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01, "tooltip": "The end percentage of steps to apply"}), + "strength": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.001}), + } + } + + RETURN_TYPES = ("MODEL",) + FUNCTION = "patch" + + CATEGORY = "KJNodes/experimental" + + def patch(self, model, latent, index, strength, start_percent, end_percent): + + def outer_wrapper(samples, index, start_percent, end_percent): + def unet_wrapper(apply_model, args): + steps = args["c"]["transformer_options"]["sample_sigmas"] + inp, timestep, c = args["input"], args["timestep"], args["c"] + matched_step_index = (steps == timestep).nonzero() + if len(matched_step_index) > 0: + current_step_index = matched_step_index.item() + else: + for i in range(len(steps) - 1): + # walk from beginning of steps until crossing the timestep + if (steps[i] - timestep[0]) * (steps[i + 1] - timestep[0]) <= 0: + current_step_index = i + break + else: + current_step_index = 0 + current_percent = current_step_index / (len(steps) - 1) + if samples is not None: + if start_percent <= current_percent <= end_percent: + inp[:, :, [index], :, :] = samples[:, :, [0], :, :].to(inp) + else: + inp[:, :, [index], :, :] = torch.zeros(1) + return apply_model(inp, timestep, **c) + return unet_wrapper + + samples = latent["samples"] * 0.476986 * strength + m = model.clone() + m.set_model_unet_function_wrapper(outer_wrapper(samples, index, start_percent, end_percent)) + + return (m,) + +class ImageNoiseAugmentation: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "image": ("IMAGE",), + "noise_aug_strength": ("FLOAT", {"default": None, "min": 0.0, "max": 100.0, "step": 0.001}), + "seed": ("INT", {"default": 123,"min": 0, "max": 0xffffffffffffffff, "step": 1}), + } + } + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "add_noise" + CATEGORY = "KJNodes/image" + DESCRIPTION = """ + Add noise to an image. + """ + + def add_noise(self, image, noise_aug_strength, seed): + torch.manual_seed(seed) + sigma = torch.ones((image.shape[0],)).to(image.device, image.dtype) * noise_aug_strength + image_noise = torch.randn_like(image) * sigma[:, None, None, None] + image_noise = torch.where(image==-1, torch.zeros_like(image), image_noise) + image_out = image + image_noise + return image_out, + +class VAELoaderKJ: + @staticmethod + def vae_list(): + vaes = folder_paths.get_filename_list("vae") + approx_vaes = folder_paths.get_filename_list("vae_approx") + sdxl_taesd_enc = False + sdxl_taesd_dec = False + sd1_taesd_enc = False + sd1_taesd_dec = False + sd3_taesd_enc = False + sd3_taesd_dec = False + f1_taesd_enc = False + f1_taesd_dec = False + + for v in approx_vaes: + if v.startswith("taesd_decoder."): + sd1_taesd_dec = True + elif v.startswith("taesd_encoder."): + sd1_taesd_enc = True + elif v.startswith("taesdxl_decoder."): + sdxl_taesd_dec = True + elif v.startswith("taesdxl_encoder."): + sdxl_taesd_enc = True + elif v.startswith("taesd3_decoder."): + sd3_taesd_dec = True + elif v.startswith("taesd3_encoder."): + sd3_taesd_enc = True + elif v.startswith("taef1_encoder."): + f1_taesd_dec = True + elif v.startswith("taef1_decoder."): + f1_taesd_enc = True + if sd1_taesd_dec and sd1_taesd_enc: + vaes.append("taesd") + if sdxl_taesd_dec and sdxl_taesd_enc: + vaes.append("taesdxl") + if sd3_taesd_dec and sd3_taesd_enc: + vaes.append("taesd3") + if f1_taesd_dec and f1_taesd_enc: + vaes.append("taef1") + return vaes + + @staticmethod + def load_taesd(name): + sd = {} + approx_vaes = folder_paths.get_filename_list("vae_approx") + + encoder = next(filter(lambda a: a.startswith("{}_encoder.".format(name)), approx_vaes)) + decoder = next(filter(lambda a: a.startswith("{}_decoder.".format(name)), approx_vaes)) + + enc = load_torch_file(folder_paths.get_full_path_or_raise("vae_approx", encoder)) + for k in enc: + sd["taesd_encoder.{}".format(k)] = enc[k] + + dec = load_torch_file(folder_paths.get_full_path_or_raise("vae_approx", decoder)) + for k in dec: + sd["taesd_decoder.{}".format(k)] = dec[k] + + if name == "taesd": + sd["vae_scale"] = torch.tensor(0.18215) + sd["vae_shift"] = torch.tensor(0.0) + elif name == "taesdxl": + sd["vae_scale"] = torch.tensor(0.13025) + sd["vae_shift"] = torch.tensor(0.0) + elif name == "taesd3": + sd["vae_scale"] = torch.tensor(1.5305) + sd["vae_shift"] = torch.tensor(0.0609) + elif name == "taef1": + sd["vae_scale"] = torch.tensor(0.3611) + sd["vae_shift"] = torch.tensor(0.1159) + return sd + + @classmethod + def INPUT_TYPES(s): + return { + "required": { "vae_name": (s.vae_list(), ), + "device": (["main_device", "cpu"],), + "weight_dtype": (["bf16", "fp16", "fp32" ],), + } + } + + RETURN_TYPES = ("VAE",) + FUNCTION = "load_vae" + CATEGORY = "KJNodes/vae" + + def load_vae(self, vae_name, device, weight_dtype): + from comfy.sd import VAE + dtype = {"bf16": torch.bfloat16, "fp16": torch.float16, "fp32": torch.float32}[weight_dtype] + if device == "main_device": + device = model_management.get_torch_device() + elif device == "cpu": + device = torch.device("cpu") + if vae_name in ["taesd", "taesdxl", "taesd3", "taef1"]: + sd = self.load_taesd(vae_name) + else: + vae_path = folder_paths.get_full_path_or_raise("vae", vae_name) + sd = load_torch_file(vae_path) + vae = VAE(sd=sd, device=device, dtype=dtype) + return (vae,) + +from comfy.samplers import sampling_function, CFGGuider +class Guider_ScheduledCFG(CFGGuider): + + def set_cfg(self, cfg, start_percent, end_percent): + self.cfg = cfg + self.start_percent = start_percent + self.end_percent = end_percent + + def predict_noise(self, x, timestep, model_options={}, seed=None): + steps = model_options["transformer_options"]["sample_sigmas"] + matched_step_index = (steps == timestep).nonzero() + assert not (isinstance(self.cfg, list) and len(self.cfg) != (len(steps) - 1)), "cfg list length must match step count" + if len(matched_step_index) > 0: + current_step_index = matched_step_index.item() + else: + for i in range(len(steps) - 1): + # walk from beginning of steps until crossing the timestep + if (steps[i] - timestep[0]) * (steps[i + 1] - timestep[0]) <= 0: + current_step_index = i + break + else: + current_step_index = 0 + current_percent = current_step_index / (len(steps) - 1) + + if self.start_percent <= current_percent <= self.end_percent: + if isinstance(self.cfg, list): + cfg = self.cfg[current_step_index] + else: + cfg = self.cfg + uncond = self.conds.get("negative", None) + else: + uncond = None + cfg = 1.0 + + return sampling_function(self.inner_model, x, timestep, uncond, self.conds.get("positive", None), cfg, model_options=model_options, seed=seed) + +class ScheduledCFGGuidance: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "model": ("MODEL",), + "positive": ("CONDITIONING", ), + "negative": ("CONDITIONING", ), + "cfg": ("FLOAT", {"default": 6.0, "min": 0.0, "max": 100.0, "step": 0.01}), + "start_percent": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step":0.01}), + "end_percent": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step":0.01}), + }, + } + RETURN_TYPES = ("GUIDER",) + FUNCTION = "get_guider" + CATEGORY = "KJNodes/experimental" + DESCRiPTION = """ +CFG Guider that allows for scheduled CFG changes over steps, the steps outside the range will use CFG 1.0 thus being processed faster. +cfg input can be a list of floats matching step count, or a single float for all steps. +""" + + def get_guider(self, model, cfg, positive, negative, start_percent, end_percent): + guider = Guider_ScheduledCFG(model) + guider.set_conds(positive, negative) + guider.set_cfg(cfg, start_percent, end_percent) + return (guider, ) + + +class ApplyRifleXRoPE_WanVideo: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "model": ("MODEL",), + "latent": ("LATENT", {"tooltip": "Only used to get the latent count"}), + "k": ("INT", {"default": 6, "min": 1, "max": 100, "step": 1, "tooltip": "Index of intrinsic frequency"}), + } + } + + RETURN_TYPES = ("MODEL",) + FUNCTION = "patch" + CATEGORY = "KJNodes/experimental" + EXPERIMENTAL = True + DESCRIPTION = "Extends the potential frame count of HunyuanVideo using this method: https://github.com/thu-ml/RIFLEx" + + def patch(self, model, latent, k): + model_class = model.model.diffusion_model + + model_clone = model.clone() + num_frames = latent["samples"].shape[2] + d = model_class.dim // model_class.num_heads + + rope_embedder = EmbedND_RifleX( + d, + 10000.0, + [d - 4 * (d // 6), 2 * (d // 6), 2 * (d // 6)], + num_frames, + k + ) + + model_clone.add_object_patch(f"diffusion_model.rope_embedder", rope_embedder) + + return (model_clone, ) + +class ApplyRifleXRoPE_HunuyanVideo: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "model": ("MODEL",), + "latent": ("LATENT", {"tooltip": "Only used to get the latent count"}), + "k": ("INT", {"default": 4, "min": 1, "max": 100, "step": 1, "tooltip": "Index of intrinsic frequency"}), + } + } + + RETURN_TYPES = ("MODEL",) + FUNCTION = "patch" + CATEGORY = "KJNodes/experimental" + EXPERIMENTAL = True + DESCRIPTION = "Extends the potential frame count of HunyuanVideo using this method: https://github.com/thu-ml/RIFLEx" + + def patch(self, model, latent, k): + model_class = model.model.diffusion_model + + model_clone = model.clone() + num_frames = latent["samples"].shape[2] + + pe_embedder = EmbedND_RifleX( + model_class.params.hidden_size // model_class.params.num_heads, + model_class.params.theta, + model_class.params.axes_dim, + num_frames, + k + ) + + model_clone.add_object_patch(f"diffusion_model.pe_embedder", pe_embedder) + + return (model_clone, ) + +def rope_riflex(pos, dim, theta, L_test, k): + from einops import rearrange + assert dim % 2 == 0 + if model_management.is_device_mps(pos.device) or model_management.is_intel_xpu() or model_management.is_directml_enabled(): + device = torch.device("cpu") + else: + device = pos.device + + scale = torch.linspace(0, (dim - 2) / dim, steps=dim//2, dtype=torch.float64, device=device) + omega = 1.0 / (theta**scale) + + # RIFLEX modification - adjust last frequency component if L_test and k are provided + if k and L_test: + omega[k-1] = 0.9 * 2 * torch.pi / L_test + + out = torch.einsum("...n,d->...nd", pos.to(dtype=torch.float32, device=device), omega) + out = torch.stack([torch.cos(out), -torch.sin(out), torch.sin(out), torch.cos(out)], dim=-1) + out = rearrange(out, "b n d (i j) -> b n d i j", i=2, j=2) + return out.to(dtype=torch.float32, device=pos.device) + +class EmbedND_RifleX(nn.Module): + def __init__(self, dim, theta, axes_dim, num_frames, k): + super().__init__() + self.dim = dim + self.theta = theta + self.axes_dim = axes_dim + self.num_frames = num_frames + self.k = k + + def forward(self, ids): + n_axes = ids.shape[-1] + emb = torch.cat( + [rope_riflex(ids[..., i], self.axes_dim[i], self.theta, self.num_frames, self.k if i == 0 else 0) for i in range(n_axes)], + dim=-3, + ) + return emb.unsqueeze(1) + + +class Timer: + def __init__(self, name): + self.name = name + self.start_time = None + self.elapsed = 0 + +class TimerNodeKJ: + @classmethod + + def INPUT_TYPES(s): + return { + "required": { + "any_input": (any, {}), + "mode": (["start", "stop"],), + "name": ("STRING", {"default": "Timer"}), + }, + "optional": { + "timer": ("TIMER",), + }, + } + + RETURN_TYPES = (any, "TIMER", "INT", ) + RETURN_NAMES = ("any_output", "timer", "time") + FUNCTION = "timer" + CATEGORY = "KJNodes/misc" + + def timer(self, mode, name, any_input=None, timer=None): + if timer is None: + if mode == "start": + timer = Timer(name=name) + timer.start_time = time.time() + return {"ui": { + "text": [f"{timer.start_time}"]}, + "result": (any_input, timer, 0) + } + elif mode == "stop" and timer is not None: + end_time = time.time() + timer.elapsed = int((end_time - timer.start_time) * 1000) + timer.start_time = None + return (any_input, timer, timer.elapsed) + +class HunyuanVideoEncodeKeyframesToCond: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "model": ("MODEL",), + "positive": ("CONDITIONING", ), + "vae": ("VAE", ), + "start_frame": ("IMAGE", ), + "end_frame": ("IMAGE", ), + "num_frames": ("INT", {"default": 33, "min": 2, "max": 4096, "step": 1}), + "tile_size": ("INT", {"default": 512, "min": 64, "max": 4096, "step": 64}), + "overlap": ("INT", {"default": 64, "min": 0, "max": 4096, "step": 32}), + "temporal_size": ("INT", {"default": 64, "min": 8, "max": 4096, "step": 4, "tooltip": "Only used for video VAEs: Amount of frames to encode at a time."}), + "temporal_overlap": ("INT", {"default": 8, "min": 4, "max": 4096, "step": 4, "tooltip": "Only used for video VAEs: Amount of frames to overlap."}), + }, + "optional": { + "negative": ("CONDITIONING", ), + } + } + + RETURN_TYPES = ("MODEL", "CONDITIONING","CONDITIONING","LATENT") + RETURN_NAMES = ("model", "positive", "negative", "latent") + FUNCTION = "encode" + + CATEGORY = "KJNodes/videomodels" + + def encode(self, model, positive, start_frame, end_frame, num_frames, vae, tile_size, overlap, temporal_size, temporal_overlap, negative=None): + + model_clone = model.clone() + + model_clone.add_object_patch("concat_keys", ("concat_image",)) + + + x = (start_frame.shape[1] // 8) * 8 + y = (start_frame.shape[2] // 8) * 8 + + if start_frame.shape[1] != x or start_frame.shape[2] != y: + x_offset = (start_frame.shape[1] % 8) // 2 + y_offset = (start_frame.shape[2] % 8) // 2 + start_frame = start_frame[:,x_offset:x + x_offset, y_offset:y + y_offset,:] + if end_frame.shape[1] != x or end_frame.shape[2] != y: + x_offset = (start_frame.shape[1] % 8) // 2 + y_offset = (start_frame.shape[2] % 8) // 2 + end_frame = end_frame[:,x_offset:x + x_offset, y_offset:y + y_offset,:] + + video_frames = torch.zeros(num_frames-2, start_frame.shape[1], start_frame.shape[2], start_frame.shape[3], device=start_frame.device, dtype=start_frame.dtype) + video_frames = torch.cat([start_frame, video_frames, end_frame], dim=0) + + concat_latent = vae.encode_tiled(video_frames[:,:,:,:3], tile_x=tile_size, tile_y=tile_size, overlap=overlap, tile_t=temporal_size, overlap_t=temporal_overlap) + + out_latent = {} + out_latent["samples"] = torch.zeros_like(concat_latent) + + out = [] + for conditioning in [positive, negative if negative is not None else []]: + c = [] + for t in conditioning: + d = t[1].copy() + d["concat_latent_image"] = concat_latent + n = [t[0], d] + c.append(n) + out.append(c) + if len(out) == 1: + out.append(out[0]) + return (model_clone, out[0], out[1], out_latent) \ No newline at end of file diff --git a/custom_nodes/ComfyUI-KJNodes-main/pyproject.toml b/custom_nodes/ComfyUI-KJNodes-main/pyproject.toml new file mode 100644 index 0000000000000000000000000000000000000000..01d387c81092d4cab9a45a18ca971578e498401f --- /dev/null +++ b/custom_nodes/ComfyUI-KJNodes-main/pyproject.toml @@ -0,0 +1,15 @@ +[project] +name = "comfyui-kjnodes" +description = "Various quality of life -nodes for ComfyUI, mostly just visual stuff to improve usability." +version = "1.0.8" +license = {file = "LICENSE"} +dependencies = ["librosa", "numpy", "pillow>=10.3.0", "scipy", "color-matcher", "matplotlib", "huggingface_hub"] + +[project.urls] +Repository = "https://github.com/kijai/ComfyUI-KJNodes" +# Used by Comfy Registry https://comfyregistry.org + +[tool.comfy] +PublisherId = "kijai" +DisplayName = "ComfyUI-KJNodes" +Icon = "" diff --git a/custom_nodes/ComfyUI-KJNodes-main/requirements.txt b/custom_nodes/ComfyUI-KJNodes-main/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..5bc18ca95b226298cbb88bd4de3307c157e0a88b --- /dev/null +++ b/custom_nodes/ComfyUI-KJNodes-main/requirements.txt @@ -0,0 +1,7 @@ +pillow>=10.3.0 +scipy +color-matcher +matplotlib +huggingface_hub +mss +opencv-python \ No newline at end of file diff --git a/custom_nodes/ComfyUI-KJNodes-main/utility/fluid.py b/custom_nodes/ComfyUI-KJNodes-main/utility/fluid.py new file mode 100644 index 0000000000000000000000000000000000000000..c0691987f5249a031ecbb74329ba513d5788b691 --- /dev/null +++ b/custom_nodes/ComfyUI-KJNodes-main/utility/fluid.py @@ -0,0 +1,67 @@ +import numpy as np +from scipy.ndimage import map_coordinates, spline_filter +from scipy.sparse.linalg import factorized + +from .numerical import difference, operator + + +class Fluid: + def __init__(self, shape, *quantities, pressure_order=1, advect_order=3): + self.shape = shape + self.dimensions = len(shape) + + # Prototyping is simplified by dynamically + # creating advected quantities as needed. + self.quantities = quantities + for q in quantities: + setattr(self, q, np.zeros(shape)) + + self.indices = np.indices(shape) + self.velocity = np.zeros((self.dimensions, *shape)) + + laplacian = operator(shape, difference(2, pressure_order)) + self.pressure_solver = factorized(laplacian) + + self.advect_order = advect_order + + def step(self): + # Advection is computed backwards in time as described in Stable Fluids. + advection_map = self.indices - self.velocity + + # SciPy's spline filter introduces checkerboard divergence. + # A linear blend of the filtered and unfiltered fields based + # on some value epsilon eliminates this error. + def advect(field, filter_epsilon=10e-2, mode='constant'): + filtered = spline_filter(field, order=self.advect_order, mode=mode) + field = filtered * (1 - filter_epsilon) + field * filter_epsilon + return map_coordinates(field, advection_map, prefilter=False, order=self.advect_order, mode=mode) + + # Apply advection to each axis of the + # velocity field and each user-defined quantity. + for d in range(self.dimensions): + self.velocity[d] = advect(self.velocity[d]) + + for q in self.quantities: + setattr(self, q, advect(getattr(self, q))) + + # Compute the jacobian at each point in the + # velocity field to extract curl and divergence. + jacobian_shape = (self.dimensions,) * 2 + partials = tuple(np.gradient(d) for d in self.velocity) + jacobian = np.stack(partials).reshape(*jacobian_shape, *self.shape) + + divergence = jacobian.trace() + + # If this curl calculation is extended to 3D, the y-axis value must be negated. + # This corresponds to the coefficients of the levi-civita symbol in that dimension. + # Higher dimensions do not have a vector -> scalar, or vector -> vector, + # correspondence between velocity and curl due to differing isomorphisms + # between exterior powers in dimensions != 2 or 3 respectively. + curl_mask = np.triu(np.ones(jacobian_shape, dtype=bool), k=1) + curl = (jacobian[curl_mask] - jacobian[curl_mask.T]).squeeze() + + # Apply the pressure correction to the fluid's velocity field. + pressure = self.pressure_solver(divergence.flatten()).reshape(self.shape) + self.velocity -= np.gradient(pressure) + + return divergence, curl, pressure \ No newline at end of file diff --git a/custom_nodes/ComfyUI-KJNodes-main/utility/magictex.py b/custom_nodes/ComfyUI-KJNodes-main/utility/magictex.py new file mode 100644 index 0000000000000000000000000000000000000000..e6d426f7deb3deb977604dd37581eb4e9fe9e6a9 --- /dev/null +++ b/custom_nodes/ComfyUI-KJNodes-main/utility/magictex.py @@ -0,0 +1,95 @@ +"""Generates psychedelic color textures in the spirit of Blender's magic texture shader using Python/Numpy + +https://github.com/cheind/magic-texture +""" +from typing import Tuple, Optional +import numpy as np + + +def coordinate_grid(shape: Tuple[int, int], dtype=np.float32): + """Returns a three-dimensional coordinate grid of given shape for use in `magic`.""" + x = np.linspace(-1, 1, shape[1], endpoint=True, dtype=dtype) + y = np.linspace(-1, 1, shape[0], endpoint=True, dtype=dtype) + X, Y = np.meshgrid(x, y) + XYZ = np.stack((X, Y, np.ones_like(X)), -1) + return XYZ + + +def random_transform(coords: np.ndarray, rng: np.random.Generator = None): + """Returns randomly transformed coordinates""" + H, W = coords.shape[:2] + rng = rng or np.random.default_rng() + m = rng.uniform(-1.0, 1.0, size=(3, 3)).astype(coords.dtype) + return (coords.reshape(-1, 3) @ m.T).reshape(H, W, 3) + + +def magic( + coords: np.ndarray, + depth: Optional[int] = None, + distortion: Optional[int] = None, + rng: np.random.Generator = None, +): + """Returns color magic color texture. + + The implementation is based on Blender's (https://www.blender.org/) magic + texture shader. The following adaptions have been made: + - we exchange the nested if-cascade by a probabilistic iterative approach + + Kwargs + ------ + coords: HxWx3 array + Coordinates transformed into colors by this method. See + `magictex.coordinate_grid` to generate the default. + depth: int (optional) + Number of transformations applied. Higher numbers lead to more + nested patterns. If not specified, randomly sampled. + distortion: float (optional) + Distortion of patterns. Larger values indicate more distortion, + lower values tend to generate smoother patterns. If not specified, + randomly sampled. + rng: np.random.Generator + Optional random generator to draw samples from. + + Returns + ------- + colors: HxWx3 array + Three channel color image in range [0,1] + """ + rng = rng or np.random.default_rng() + if distortion is None: + distortion = rng.uniform(1, 4) + if depth is None: + depth = rng.integers(1, 5) + + H, W = coords.shape[:2] + XYZ = coords + x = np.sin((XYZ[..., 0] + XYZ[..., 1] + XYZ[..., 2]) * distortion) + y = np.cos((-XYZ[..., 0] + XYZ[..., 1] - XYZ[..., 2]) * distortion) + z = -np.cos((-XYZ[..., 0] - XYZ[..., 1] + XYZ[..., 2]) * distortion) + + if depth > 0: + x *= distortion + y *= distortion + z *= distortion + y = -np.cos(x - y + z) + y *= distortion + + xyz = [x, y, z] + fns = [np.cos, np.sin] + for _ in range(1, depth): + axis = rng.choice(3) + fn = fns[rng.choice(2)] + signs = rng.binomial(n=1, p=0.5, size=4) * 2 - 1 + + xyz[axis] = signs[-1] * fn( + signs[0] * xyz[0] + signs[1] * xyz[1] + signs[2] * xyz[2] + ) + xyz[axis] *= distortion + + x, y, z = xyz + x /= 2 * distortion + y /= 2 * distortion + z /= 2 * distortion + c = 0.5 - np.stack((x, y, z), -1) + np.clip(c, 0, 1.0) + return c \ No newline at end of file diff --git a/custom_nodes/ComfyUI-KJNodes-main/utility/numerical.py b/custom_nodes/ComfyUI-KJNodes-main/utility/numerical.py new file mode 100644 index 0000000000000000000000000000000000000000..b5b88bc63c45d63d8913e56cbd06eb7ab413fe4f --- /dev/null +++ b/custom_nodes/ComfyUI-KJNodes-main/utility/numerical.py @@ -0,0 +1,25 @@ +from functools import reduce +from itertools import cycle +from math import factorial + +import numpy as np +import scipy.sparse as sp + + +def difference(derivative, accuracy=1): + # Central differences implemented based on the article here: + # http://web.media.mit.edu/~crtaylor/calculator.html + derivative += 1 + radius = accuracy + derivative // 2 - 1 + points = range(-radius, radius + 1) + coefficients = np.linalg.inv(np.vander(points)) + return coefficients[-derivative] * factorial(derivative - 1), points + + +def operator(shape, *differences): + # Credit to Philip Zucker for figuring out + # that kronsum's argument order is reversed. + # Without that bit of wisdom I'd have lost it. + differences = zip(shape, cycle(differences)) + factors = (sp.diags(*diff, shape=(dim,) * 2) for dim, diff in differences) + return reduce(lambda a, f: sp.kronsum(f, a, format='csc'), factors) \ No newline at end of file diff --git a/custom_nodes/ComfyUI-KJNodes-main/utility/utility.py b/custom_nodes/ComfyUI-KJNodes-main/utility/utility.py new file mode 100644 index 0000000000000000000000000000000000000000..f3b5c425922784522791e33c225c29be1e8249e0 --- /dev/null +++ b/custom_nodes/ComfyUI-KJNodes-main/utility/utility.py @@ -0,0 +1,39 @@ +import torch +import numpy as np +from PIL import Image +from typing import Union, List + +# Utility functions from mtb nodes: https://github.com/melMass/comfy_mtb +def pil2tensor(image: Union[Image.Image, List[Image.Image]]) -> torch.Tensor: + if isinstance(image, list): + return torch.cat([pil2tensor(img) for img in image], dim=0) + + return torch.from_numpy(np.array(image).astype(np.float32) / 255.0).unsqueeze(0) + + +def np2tensor(img_np: Union[np.ndarray, List[np.ndarray]]) -> torch.Tensor: + if isinstance(img_np, list): + return torch.cat([np2tensor(img) for img in img_np], dim=0) + + return torch.from_numpy(img_np.astype(np.float32) / 255.0).unsqueeze(0) + + +def tensor2np(tensor: torch.Tensor): + if len(tensor.shape) == 3: # Single image + return np.clip(255.0 * tensor.cpu().numpy(), 0, 255).astype(np.uint8) + else: # Batch of images + return [np.clip(255.0 * t.cpu().numpy(), 0, 255).astype(np.uint8) for t in tensor] + +def tensor2pil(image: torch.Tensor) -> List[Image.Image]: + batch_count = image.size(0) if len(image.shape) > 3 else 1 + if batch_count > 1: + out = [] + for i in range(batch_count): + out.extend(tensor2pil(image[i])) + return out + + return [ + Image.fromarray( + np.clip(255.0 * image.cpu().numpy().squeeze(), 0, 255).astype(np.uint8) + ) + ] \ No newline at end of file diff --git a/custom_nodes/ComfyUI-KJNodes-main/web/green.png b/custom_nodes/ComfyUI-KJNodes-main/web/green.png new file mode 100644 index 0000000000000000000000000000000000000000..900964e4b3907145fe1e75a5b58473567450e16d Binary files /dev/null and b/custom_nodes/ComfyUI-KJNodes-main/web/green.png differ diff --git a/custom_nodes/ComfyUI-KJNodes-main/web/js/appearance.js b/custom_nodes/ComfyUI-KJNodes-main/web/js/appearance.js new file mode 100644 index 0000000000000000000000000000000000000000..d90b4aa34d4c52b22a4411194100972c83eed88d --- /dev/null +++ b/custom_nodes/ComfyUI-KJNodes-main/web/js/appearance.js @@ -0,0 +1,23 @@ +import { app } from "../../../scripts/app.js"; + +app.registerExtension({ + name: "KJNodes.appearance", + nodeCreated(node) { + switch (node.comfyClass) { + case "INTConstant": + node.setSize([200, 58]); + node.color = "#1b4669"; + node.bgcolor = "#29699c"; + break; + case "FloatConstant": + node.setSize([200, 58]); + node.color = LGraphCanvas.node_colors.green.color; + node.bgcolor = LGraphCanvas.node_colors.green.bgcolor; + break; + case "ConditioningMultiCombine": + node.color = LGraphCanvas.node_colors.brown.color; + node.bgcolor = LGraphCanvas.node_colors.brown.bgcolor; + break; + } + } +}); diff --git a/custom_nodes/ComfyUI-KJNodes-main/web/js/browserstatus.js b/custom_nodes/ComfyUI-KJNodes-main/web/js/browserstatus.js new file mode 100644 index 0000000000000000000000000000000000000000..45abafb163481d9d760c8b273aad4d2a00db1e92 --- /dev/null +++ b/custom_nodes/ComfyUI-KJNodes-main/web/js/browserstatus.js @@ -0,0 +1,55 @@ +import { api } from "../../../scripts/api.js"; +import { app } from "../../../scripts/app.js"; + +app.registerExtension({ + name: "KJNodes.browserstatus", + setup() { + if (!app.ui.settings.getSettingValue("KJNodes.browserStatus")) { + return; + } + api.addEventListener("status", ({ detail }) => { + let title = "ComfyUI"; + let favicon = "green"; + let queueRemaining = detail && detail.exec_info.queue_remaining; + + if (queueRemaining) { + favicon = "red"; + title = `00% - ${queueRemaining} | ${title}`; + } + let link = document.querySelector("link[rel~='icon']"); + if (!link) { + link = document.createElement("link"); + link.rel = "icon"; + document.head.appendChild(link); + } + link.href = new URL(`../${favicon}.png`, import.meta.url); + document.title = title; + }); + //add progress to the title + api.addEventListener("progress", ({ detail }) => { + const { value, max } = detail; + const progress = Math.floor((value / max) * 100); + let title = document.title; + + if (!isNaN(progress) && progress >= 0 && progress <= 100) { + const paddedProgress = String(progress).padStart(2, '0'); + title = `${paddedProgress}% ${title.replace(/^\d+%\s/, '')}`; + } + document.title = title; + }); + }, + init() { + if (!app.ui.settings.getSettingValue("KJNodes.browserStatus")) { + return; + } + const pythongossFeed = app.extensions.find( + (e) => e.name === 'pysssss.FaviconStatus', + ) + if (pythongossFeed) { + console.warn("KJNodes - Overriding pysssss.FaviconStatus") + pythongossFeed.setup = function() { + console.warn("Disabled by KJNodes") + }; + } + }, +}); \ No newline at end of file diff --git a/custom_nodes/ComfyUI-KJNodes-main/web/js/contextmenu.js b/custom_nodes/ComfyUI-KJNodes-main/web/js/contextmenu.js new file mode 100644 index 0000000000000000000000000000000000000000..8485658ef819722280160b124a2acf39353bb96d --- /dev/null +++ b/custom_nodes/ComfyUI-KJNodes-main/web/js/contextmenu.js @@ -0,0 +1,147 @@ +import { app } from "../../../scripts/app.js"; + +// Adds context menu entries, code partly from pyssssscustom-scripts + +function addMenuHandler(nodeType, cb) { + const getOpts = nodeType.prototype.getExtraMenuOptions; + nodeType.prototype.getExtraMenuOptions = function () { + const r = getOpts.apply(this, arguments); + cb.apply(this, arguments); + return r; + }; +} + +function addNode(name, nextTo, options) { + console.log("name:", name); + console.log("nextTo:", nextTo); + options = { side: "left", select: true, shiftY: 0, shiftX: 0, ...(options || {}) }; + const node = LiteGraph.createNode(name); + app.graph.add(node); + + node.pos = [ + options.side === "left" ? nextTo.pos[0] - (node.size[0] + options.offset): nextTo.pos[0] + nextTo.size[0] + options.offset, + + nextTo.pos[1] + options.shiftY, + ]; + if (options.select) { + app.canvas.selectNode(node, false); + } + return node; +} + +app.registerExtension({ + name: "KJNodesContextmenu", + async beforeRegisterNodeDef(nodeType, nodeData, app) { + if (nodeData.input && nodeData.input.required) { + addMenuHandler(nodeType, function (_, options) { + options.unshift( + { + content: "Add GetNode", + callback: () => {addNode("GetNode", this, { side:"left", offset: 30});} + }, + { + content: "Add SetNode", + callback: () => {addNode("SetNode", this, { side:"right", offset: 30 }); + }, + }); + }); + } + }, + async setup(app) { + const updateSlots = (value) => { + const valuesToAddToIn = ["GetNode"]; + const valuesToAddToOut = ["SetNode"]; + // Remove entries if they exist + for (const arr of Object.values(LiteGraph.slot_types_default_in)) { + for (const valueToAdd of valuesToAddToIn) { + const idx = arr.indexOf(valueToAdd); + if (idx !== -1) { + arr.splice(idx, 1); + } + } + } + + for (const arr of Object.values(LiteGraph.slot_types_default_out)) { + for (const valueToAdd of valuesToAddToOut) { + const idx = arr.indexOf(valueToAdd); + if (idx !== -1) { + arr.splice(idx, 1); + } + } + } + if (value!="disabled") { + for (const arr of Object.values(LiteGraph.slot_types_default_in)) { + for (const valueToAdd of valuesToAddToIn) { + const idx = arr.indexOf(valueToAdd); + if (idx !== -1) { + arr.splice(idx, 1); + } + if (value === "top") { + arr.unshift(valueToAdd); + } else { + arr.push(valueToAdd); + } + } + } + + for (const arr of Object.values(LiteGraph.slot_types_default_out)) { + for (const valueToAdd of valuesToAddToOut) { + const idx = arr.indexOf(valueToAdd); + if (idx !== -1) { + arr.splice(idx, 1); + } + if (value === "top") { + arr.unshift(valueToAdd); + } else { + arr.push(valueToAdd); + } + } + } + } + }; + + app.ui.settings.addSetting({ + id: "KJNodes.SetGetMenu", + name: "KJNodes: Make Set/Get -nodes defaults", + tooltip: 'Adds Set/Get nodes to the top or bottom of the list of available node suggestions.', + options: ['disabled', 'top', 'bottom'], + defaultValue: 'disabled', + type: "combo", + onChange: updateSlots, + + }); + app.ui.settings.addSetting({ + id: "KJNodes.MiddleClickDefault", + name: "KJNodes: Middle click default node adding", + defaultValue: false, + type: "boolean", + onChange: (value) => { + LiteGraph.middle_click_slot_add_default_node = value; + }, + }); + app.ui.settings.addSetting({ + id: "KJNodes.nodeAutoColor", + name: "KJNodes: Automatically set node colors", + type: "boolean", + defaultValue: true, + }); + app.ui.settings.addSetting({ + id: "KJNodes.helpPopup", + name: "KJNodes: Help popups", + defaultValue: true, + type: "boolean", + }); + app.ui.settings.addSetting({ + id: "KJNodes.disablePrefix", + name: "KJNodes: Disable automatic Set_ and Get_ prefix", + defaultValue: true, + type: "boolean", + }); + app.ui.settings.addSetting({ + id: "KJNodes.browserStatus", + name: "KJNodes: 🟒 Stoplight browser status icon πŸ”΄", + defaultValue: false, + type: "boolean", + }); +} +}); diff --git a/custom_nodes/ComfyUI-KJNodes-main/web/js/fast_preview.js b/custom_nodes/ComfyUI-KJNodes-main/web/js/fast_preview.js new file mode 100644 index 0000000000000000000000000000000000000000..822c1f745ba4e895364664f7dbed3d225f0430b2 --- /dev/null +++ b/custom_nodes/ComfyUI-KJNodes-main/web/js/fast_preview.js @@ -0,0 +1,95 @@ +import { app } from '../../../scripts/app.js' + +//from melmass +export function makeUUID() { + let dt = new Date().getTime() + const uuid = 'xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx'.replace(/[xy]/g, (c) => { + const r = ((dt + Math.random() * 16) % 16) | 0 + dt = Math.floor(dt / 16) + return (c === 'x' ? r : (r & 0x3) | 0x8).toString(16) + }) + return uuid +} + +function chainCallback(object, property, callback) { + if (object == undefined) { + //This should not happen. + console.error("Tried to add callback to non-existant object") + return; + } + if (property in object) { + const callback_orig = object[property] + object[property] = function () { + const r = callback_orig.apply(this, arguments); + callback.apply(this, arguments); + return r + }; + } else { + object[property] = callback; + } +} +app.registerExtension({ + name: 'KJNodes.FastPreview', + + async beforeRegisterNodeDef(nodeType, nodeData) { + if (nodeData?.name === 'FastPreview') { + chainCallback(nodeType.prototype, "onNodeCreated", function () { + + var element = document.createElement("div"); + this.uuid = makeUUID() + element.id = `fast-preview-${this.uuid}` + + this.previewWidget = this.addDOMWidget(nodeData.name, "FastPreviewWidget", element, { + serialize: false, + hideOnZoom: false, + }); + + this.previewer = new Previewer(this); + + this.setSize([550, 550]); + this.resizable = false; + this.previewWidget.parentEl = document.createElement("div"); + this.previewWidget.parentEl.className = "fast-preview"; + this.previewWidget.parentEl.id = `fast-preview-${this.uuid}` + element.appendChild(this.previewWidget.parentEl); + + chainCallback(this, "onExecuted", function (message) { + let bg_image = message["bg_image"]; + this.properties.imgData = { + name: "bg_image", + base64: bg_image + }; + this.previewer.refreshBackgroundImage(this); + }); + + + }); // onAfterGraphConfigured + }//node created + } //before register +})//register + +class Previewer { + constructor(context) { + this.node = context; + this.previousWidth = null; + this.previousHeight = null; + } + refreshBackgroundImage = () => { + const imgData = this.node?.properties?.imgData; + if (imgData?.base64) { + const base64String = imgData.base64; + const imageUrl = `data:${imgData.type};base64,${base64String}`; + const img = new Image(); + img.src = imageUrl; + img.onload = () => { + const { width, height } = img; + if (width !== this.previousWidth || height !== this.previousHeight) { + this.node.setSize([width, height]); + this.previousWidth = width; + this.previousHeight = height; + } + this.node.previewWidget.element.style.backgroundImage = `url(${imageUrl})`; + }; + } + }; + } \ No newline at end of file diff --git a/custom_nodes/ComfyUI-KJNodes-main/web/js/help_popup.js b/custom_nodes/ComfyUI-KJNodes-main/web/js/help_popup.js new file mode 100644 index 0000000000000000000000000000000000000000..ff9056f7efa41c247f67fed52bf8ae82538afb9f --- /dev/null +++ b/custom_nodes/ComfyUI-KJNodes-main/web/js/help_popup.js @@ -0,0 +1,326 @@ +import { app } from "../../../scripts/app.js"; + +// code based on mtb nodes by Mel Massadian https://github.com/melMass/comfy_mtb/ +export const loadScript = ( + FILE_URL, + async = true, + type = 'text/javascript', +) => { + return new Promise((resolve, reject) => { + try { + // Check if the script already exists + const existingScript = document.querySelector(`script[src="${FILE_URL}"]`) + if (existingScript) { + resolve({ status: true, message: 'Script already loaded' }) + return + } + + const scriptEle = document.createElement('script') + scriptEle.type = type + scriptEle.async = async + scriptEle.src = FILE_URL + + scriptEle.addEventListener('load', (ev) => { + resolve({ status: true }) + }) + + scriptEle.addEventListener('error', (ev) => { + reject({ + status: false, + message: `Failed to load the script ${FILE_URL}`, + }) + }) + + document.body.appendChild(scriptEle) + } catch (error) { + reject(error) + } + }) +} + +loadScript('/kjweb_async/marked.min.js').catch((e) => { + console.log(e) +}) +loadScript('/kjweb_async/purify.min.js').catch((e) => { + console.log(e) +}) + +const categories = ["KJNodes", "SUPIR", "VoiceCraft", "Marigold", "IC-Light", "WanVideoWrapper"]; +app.registerExtension({ + name: "KJNodes.HelpPopup", + async beforeRegisterNodeDef(nodeType, nodeData) { + + if (app.ui.settings.getSettingValue("KJNodes.helpPopup") === false) { + return; + } + try { + categories.forEach(category => { + if (nodeData?.category?.startsWith(category)) { + addDocumentation(nodeData, nodeType); + } + else return + }); + } catch (error) { + console.error("Error in registering KJNodes.HelpPopup", error); + } + }, +}); + +const create_documentation_stylesheet = () => { + const tag = 'kj-documentation-stylesheet' + + let styleTag = document.head.querySelector(tag) + + if (!styleTag) { + styleTag = document.createElement('style') + styleTag.type = 'text/css' + styleTag.id = tag + styleTag.innerHTML = ` + .kj-documentation-popup { + background: var(--comfy-menu-bg); + position: absolute; + color: var(--fg-color); + font: 12px monospace; + line-height: 1.5em; + padding: 10px; + border-radius: 10px; + border-style: solid; + border-width: medium; + border-color: var(--border-color); + z-index: 5; + overflow: hidden; + } + .content-wrapper { + overflow: auto; + max-height: 100%; + /* Scrollbar styling for Chrome */ + &::-webkit-scrollbar { + width: 6px; + } + &::-webkit-scrollbar-track { + background: var(--bg-color); + } + &::-webkit-scrollbar-thumb { + background-color: var(--fg-color); + border-radius: 6px; + border: 3px solid var(--bg-color); + } + + /* Scrollbar styling for Firefox */ + scrollbar-width: thin; + scrollbar-color: var(--fg-color) var(--bg-color); + a { + color: yellow; + } + a:visited { + color: orange; + } + a:hover { + color: red; + } + } + ` + document.head.appendChild(styleTag) + } + } + + /** Add documentation widget to the selected node */ + export const addDocumentation = ( + nodeData, + nodeType, + opts = { icon_size: 14, icon_margin: 4 },) => { + + opts = opts || {} + const iconSize = opts.icon_size ? opts.icon_size : 14 + const iconMargin = opts.icon_margin ? opts.icon_margin : 4 + let docElement = null + let contentWrapper = null + //if no description in the node python code, don't do anything + if (!nodeData.description) { + return + } + + const drawFg = nodeType.prototype.onDrawForeground + nodeType.prototype.onDrawForeground = function (ctx) { + const r = drawFg ? drawFg.apply(this, arguments) : undefined + if (this.flags.collapsed) return r + + // icon position + const x = this.size[0] - iconSize - iconMargin + + // create the popup + if (this.show_doc && docElement === null) { + docElement = document.createElement('div') + contentWrapper = document.createElement('div'); + docElement.appendChild(contentWrapper); + + create_documentation_stylesheet() + contentWrapper.classList.add('content-wrapper'); + docElement.classList.add('kj-documentation-popup') + + //parse the string from the python node code to html with marked, and sanitize the html with DOMPurify + contentWrapper.innerHTML = DOMPurify.sanitize(marked.parse(nodeData.description,)) + + // resize handle + const resizeHandle = document.createElement('div'); + resizeHandle.style.width = '0'; + resizeHandle.style.height = '0'; + resizeHandle.style.position = 'absolute'; + resizeHandle.style.bottom = '0'; + resizeHandle.style.right = '0'; + resizeHandle.style.cursor = 'se-resize'; + + // Add pseudo-elements to create a triangle shape + const borderColor = getComputedStyle(document.documentElement).getPropertyValue('--border-color').trim(); + resizeHandle.style.borderTop = '10px solid transparent'; + resizeHandle.style.borderLeft = '10px solid transparent'; + resizeHandle.style.borderBottom = `10px solid ${borderColor}`; + resizeHandle.style.borderRight = `10px solid ${borderColor}`; + + docElement.appendChild(resizeHandle) + let isResizing = false + let startX, startY, startWidth, startHeight + + resizeHandle.addEventListener('mousedown', function (e) { + e.preventDefault(); + e.stopPropagation(); + isResizing = true; + startX = e.clientX; + startY = e.clientY; + startWidth = parseInt(document.defaultView.getComputedStyle(docElement).width, 10); + startHeight = parseInt(document.defaultView.getComputedStyle(docElement).height, 10); + }, + { signal: this.docCtrl.signal }, + ); + + // close button + const closeButton = document.createElement('div'); + closeButton.textContent = '❌'; + closeButton.style.position = 'absolute'; + closeButton.style.top = '0'; + closeButton.style.right = '0'; + closeButton.style.cursor = 'pointer'; + closeButton.style.padding = '5px'; + closeButton.style.color = 'red'; + closeButton.style.fontSize = '12px'; + + docElement.appendChild(closeButton) + + closeButton.addEventListener('mousedown', (e) => { + e.stopPropagation(); + this.show_doc = !this.show_doc + docElement.parentNode.removeChild(docElement) + docElement = null + if (contentWrapper) { + contentWrapper.remove() + contentWrapper = null + } + }, + { signal: this.docCtrl.signal }, + ); + + document.addEventListener('mousemove', function (e) { + if (!isResizing) return; + const scale = app.canvas.ds.scale; + const newWidth = startWidth + (e.clientX - startX) / scale; + const newHeight = startHeight + (e.clientY - startY) / scale;; + docElement.style.width = `${newWidth}px`; + docElement.style.height = `${newHeight}px`; + }, + { signal: this.docCtrl.signal }, + ); + + document.addEventListener('mouseup', function () { + isResizing = false + }, + { signal: this.docCtrl.signal }, + ) + + document.body.appendChild(docElement) + } + // close the popup + else if (!this.show_doc && docElement !== null) { + docElement.parentNode.removeChild(docElement) + docElement = null + } + // update position of the popup + if (this.show_doc && docElement !== null) { + const rect = ctx.canvas.getBoundingClientRect() + const scaleX = rect.width / ctx.canvas.width + const scaleY = rect.height / ctx.canvas.height + + const transform = new DOMMatrix() + .scaleSelf(scaleX, scaleY) + .multiplySelf(ctx.getTransform()) + .translateSelf(this.size[0] * scaleX * Math.max(1.0,window.devicePixelRatio) , 0) + .translateSelf(10, -32) + + const scale = new DOMMatrix() + .scaleSelf(transform.a, transform.d); + const bcr = app.canvas.canvas.getBoundingClientRect() + + const styleObject = { + transformOrigin: '0 0', + transform: scale, + left: `${transform.a + bcr.x + transform.e}px`, + top: `${transform.d + bcr.y + transform.f}px`, + }; + Object.assign(docElement.style, styleObject); + } + + ctx.save() + ctx.translate(x - 2, iconSize - 34) + ctx.scale(iconSize / 32, iconSize / 32) + ctx.strokeStyle = 'rgba(255,255,255,0.3)' + ctx.lineCap = 'round' + ctx.lineJoin = 'round' + ctx.lineWidth = 2.4 + ctx.font = 'bold 36px monospace' + ctx.fillStyle = 'orange'; + ctx.fillText('?', 0, 24) + ctx.restore() + return r + } + // handle clicking of the icon + const mouseDown = nodeType.prototype.onMouseDown + nodeType.prototype.onMouseDown = function (e, localPos, canvas) { + const r = mouseDown ? mouseDown.apply(this, arguments) : undefined + const iconX = this.size[0] - iconSize - iconMargin + const iconY = iconSize - 34 + if ( + localPos[0] > iconX && + localPos[0] < iconX + iconSize && + localPos[1] > iconY && + localPos[1] < iconY + iconSize + ) { + if (this.show_doc === undefined) { + this.show_doc = true + } else { + this.show_doc = !this.show_doc + } + if (this.show_doc) { + this.docCtrl = new AbortController() + } else { + this.docCtrl.abort() + } + return true; + } + return r; + } + const onRem = nodeType.prototype.onRemoved + + nodeType.prototype.onRemoved = function () { + const r = onRem ? onRem.apply(this, []) : undefined + + if (docElement) { + docElement.remove() + docElement = null + } + + if (contentWrapper) { + contentWrapper.remove() + contentWrapper = null + } + return r + } +} \ No newline at end of file diff --git a/custom_nodes/ComfyUI-KJNodes-main/web/js/jsnodes.js b/custom_nodes/ComfyUI-KJNodes-main/web/js/jsnodes.js new file mode 100644 index 0000000000000000000000000000000000000000..2050ea1d9072f2e1144b24893018938296184275 --- /dev/null +++ b/custom_nodes/ComfyUI-KJNodes-main/web/js/jsnodes.js @@ -0,0 +1,374 @@ +import { app } from "../../../scripts/app.js"; +import { applyTextReplacements } from "../../../scripts/utils.js"; + +app.registerExtension({ + name: "KJNodes.jsnodes", + async beforeRegisterNodeDef(nodeType, nodeData, app) { + if(!nodeData?.category?.startsWith("KJNodes")) { + return; + } + switch (nodeData.name) { + case "ConditioningMultiCombine": + nodeType.prototype.onNodeCreated = function () { + this.cond_type = "CONDITIONING" + this.inputs_offset = nodeData.name.includes("selective")?1:0 + this.addWidget("button", "Update inputs", null, () => { + if (!this.inputs) { + this.inputs = []; + } + const target_number_of_inputs = this.widgets.find(w => w.name === "inputcount")["value"]; + if(target_number_of_inputs===this.inputs.length)return; // already set, do nothing + + if(target_number_of_inputs < this.inputs.length){ + for(let i = this.inputs.length; i>=this.inputs_offset+target_number_of_inputs; i--) + this.removeInput(i) + } + else{ + for(let i = this.inputs.length+1-this.inputs_offset; i <= target_number_of_inputs; ++i) + this.addInput(`conditioning_${i}`, this.cond_type) + } + }); + } + break; + case "ImageBatchMulti": + case "ImageAddMulti": + case "ImageConcatMulti": + case "CrossFadeImagesMulti": + case "TransitionImagesMulti": + nodeType.prototype.onNodeCreated = function () { + this._type = "IMAGE" + this.inputs_offset = nodeData.name.includes("selective")?1:0 + this.addWidget("button", "Update inputs", null, () => { + if (!this.inputs) { + this.inputs = []; + } + const target_number_of_inputs = this.widgets.find(w => w.name === "inputcount")["value"]; + if(target_number_of_inputs===this.inputs.length)return; // already set, do nothing + + if(target_number_of_inputs < this.inputs.length){ + for(let i = this.inputs.length; i>=this.inputs_offset+target_number_of_inputs; i--) + this.removeInput(i) + } + else{ + for(let i = this.inputs.length+1-this.inputs_offset; i <= target_number_of_inputs; ++i) + this.addInput(`image_${i}`, this._type) + } + }); + } + break; + case "MaskBatchMulti": + nodeType.prototype.onNodeCreated = function () { + this._type = "MASK" + this.inputs_offset = nodeData.name.includes("selective")?1:0 + this.addWidget("button", "Update inputs", null, () => { + if (!this.inputs) { + this.inputs = []; + } + const target_number_of_inputs = this.widgets.find(w => w.name === "inputcount")["value"]; + if(target_number_of_inputs===this.inputs.length)return; // already set, do nothing + + if(target_number_of_inputs < this.inputs.length){ + for(let i = this.inputs.length; i>=this.inputs_offset+target_number_of_inputs; i--) + this.removeInput(i) + } + else{ + for(let i = this.inputs.length+1-this.inputs_offset; i <= target_number_of_inputs; ++i) + this.addInput(`mask_${i}`, this._type) + } + }); + } + break; + + case "FluxBlockLoraSelect": + case "HunyuanVideoBlockLoraSelect": + nodeType.prototype.onNodeCreated = function () { + this.addWidget("button", "Set all", null, () => { + const userInput = prompt("Enter the values to set for widgets (e.g., s0,1,2-7=2.0, d0,1,2-7=2.0, or 1.0):", ""); + if (userInput) { + const regex = /([sd])?(\d+(?:,\d+|-?\d+)*?)?=(\d+(\.\d+)?)/; + const match = userInput.match(regex); + if (match) { + const type = match[1]; + const indicesPart = match[2]; + const value = parseFloat(match[3]); + + let targetWidgets = []; + if (type === 's') { + targetWidgets = this.widgets.filter(widget => widget.name.includes("single")); + } else if (type === 'd') { + targetWidgets = this.widgets.filter(widget => widget.name.includes("double")); + } else { + targetWidgets = this.widgets; // No type specified, all widgets + } + + if (indicesPart) { + const indices = indicesPart.split(',').flatMap(part => { + if (part.includes('-')) { + const [start, end] = part.split('-').map(Number); + return Array.from({ length: end - start + 1 }, (_, i) => start + i); + } + return Number(part); + }); + + for (const index of indices) { + if (index < targetWidgets.length) { + targetWidgets[index].value = value; + } + } + } else { + // No indices provided, set value for all target widgets + for (const widget of targetWidgets) { + widget.value = value; + } + } + } else if (!isNaN(parseFloat(userInput))) { + // Single value provided, set it for all widgets + const value = parseFloat(userInput); + for (const widget of this.widgets) { + widget.value = value; + } + } else { + alert("Invalid input format. Please use the format s0,1,2-7=2.0, d0,1,2-7=2.0, or 1.0"); + } + } else { + alert("Invalid input. Please enter a value."); + } + }); + }; + break; + + case "GetMaskSizeAndCount": + const onGetMaskSizeConnectInput = nodeType.prototype.onConnectInput; + nodeType.prototype.onConnectInput = function (targetSlot, type, output, originNode, originSlot) { + const v = onGetMaskSizeConnectInput? onGetMaskSizeConnectInput.apply(this, arguments): undefined + this.outputs[1]["label"] = "width" + this.outputs[2]["label"] = "height" + this.outputs[3]["label"] = "count" + return v; + } + const onGetMaskSizeExecuted = nodeType.prototype.onAfterExecuteNode; + nodeType.prototype.onExecuted = function(message) { + const r = onGetMaskSizeExecuted? onGetMaskSizeExecuted.apply(this,arguments): undefined + let values = message["text"].toString().split('x').map(Number); + this.outputs[1]["label"] = values[1] + " width" + this.outputs[2]["label"] = values[2] + " height" + this.outputs[3]["label"] = values[0] + " count" + return r + } + break; + + case "GetImageSizeAndCount": + const onGetImageSizeConnectInput = nodeType.prototype.onConnectInput; + nodeType.prototype.onConnectInput = function (targetSlot, type, output, originNode, originSlot) { + console.log(this) + const v = onGetImageSizeConnectInput? onGetImageSizeConnectInput.apply(this, arguments): undefined + //console.log(this) + this.outputs[1]["label"] = "width" + this.outputs[2]["label"] = "height" + this.outputs[3]["label"] = "count" + return v; + } + //const onGetImageSizeExecuted = nodeType.prototype.onExecuted; + const onGetImageSizeExecuted = nodeType.prototype.onAfterExecuteNode; + nodeType.prototype.onExecuted = function(message) { + console.log(this) + const r = onGetImageSizeExecuted? onGetImageSizeExecuted.apply(this,arguments): undefined + let values = message["text"].toString().split('x').map(Number); + console.log(values) + this.outputs[1]["label"] = values[1] + " width" + this.outputs[2]["label"] = values[2] + " height" + this.outputs[3]["label"] = values[0] + " count" + return r + } + break; + + case "PreviewAnimation": + const onPreviewAnimationConnectInput = nodeType.prototype.onConnectInput; + nodeType.prototype.onConnectInput = function (targetSlot, type, output, originNode, originSlot) { + const v = onPreviewAnimationConnectInput? onPreviewAnimationConnectInput.apply(this, arguments): undefined + this.title = "Preview Animation" + return v; + } + const onPreviewAnimationExecuted = nodeType.prototype.onAfterExecuteNode; + nodeType.prototype.onExecuted = function(message) { + const r = onPreviewAnimationExecuted? onPreviewAnimationExecuted.apply(this,arguments): undefined + let values = message["text"].toString(); + this.title = "Preview Animation " + values + return r + } + break; + + case "VRAM_Debug": + const onVRAM_DebugConnectInput = nodeType.prototype.onConnectInput; + nodeType.prototype.onConnectInput = function (targetSlot, type, output, originNode, originSlot) { + const v = onVRAM_DebugConnectInput? onVRAM_DebugConnectInput.apply(this, arguments): undefined + this.outputs[3]["label"] = "freemem_before" + this.outputs[4]["label"] = "freemem_after" + return v; + } + const onVRAM_DebugExecuted = nodeType.prototype.onAfterExecuteNode; + nodeType.prototype.onExecuted = function(message) { + const r = onVRAM_DebugExecuted? onVRAM_DebugExecuted.apply(this,arguments): undefined + let values = message["text"].toString().split('x'); + this.outputs[3]["label"] = values[0] + " freemem_before" + this.outputs[4]["label"] = values[1] + " freemem_after" + return r + } + break; + + case "JoinStringMulti": + const originalOnNodeCreated = nodeType.prototype.onNodeCreated || function() {}; + nodeType.prototype.onNodeCreated = function () { + originalOnNodeCreated.apply(this, arguments); + + this._type = "STRING"; + this.inputs_offset = nodeData.name.includes("selective") ? 1 : 0; + this.addWidget("button", "Update inputs", null, () => { + if (!this.inputs) { + this.inputs = []; + } + const target_number_of_inputs = this.widgets.find(w => w.name === "inputcount")["value"]; + if (target_number_of_inputs === this.inputs.length) return; // already set, do nothing + + if (target_number_of_inputs < this.inputs.length) { + for (let i = this.inputs.length; i >= this.inputs_offset + target_number_of_inputs; i--) + this.removeInput(i); + } else { + for (let i = this.inputs.length + 1 - this.inputs_offset; i <= target_number_of_inputs; ++i) + this.addInput(`string_${i}`, this._type); + } + }); + } + break; + case "SoundReactive": + nodeType.prototype.onNodeCreated = function () { + let audioContext; + let microphoneStream; + let animationFrameId; + let analyser; + let dataArray; + let startRangeHz; + let endRangeHz; + let smoothingFactor = 0.5; + let smoothedSoundLevel = 0; + + // Function to update the widget value in real-time + const updateWidgetValueInRealTime = () => { + // Ensure analyser and dataArray are defined before using them + if (analyser && dataArray) { + analyser.getByteFrequencyData(dataArray); + + const startRangeHzWidget = this.widgets.find(w => w.name === "start_range_hz"); + if (startRangeHzWidget) startRangeHz = startRangeHzWidget.value; + const endRangeHzWidget = this.widgets.find(w => w.name === "end_range_hz"); + if (endRangeHzWidget) endRangeHz = endRangeHzWidget.value; + const smoothingFactorWidget = this.widgets.find(w => w.name === "smoothing_factor"); + if (smoothingFactorWidget) smoothingFactor = smoothingFactorWidget.value; + + // Calculate frequency bin width (frequency resolution) + const frequencyBinWidth = audioContext.sampleRate / analyser.fftSize; + // Convert the widget values from Hz to indices + const startRangeIndex = Math.floor(startRangeHz / frequencyBinWidth); + const endRangeIndex = Math.floor(endRangeHz / frequencyBinWidth); + + // Function to calculate the average value for a frequency range + const calculateAverage = (start, end) => { + const sum = dataArray.slice(start, end).reduce((acc, val) => acc + val, 0); + const average = sum / (end - start); + + // Apply exponential moving average smoothing + smoothedSoundLevel = (average * (1 - smoothingFactor)) + (smoothedSoundLevel * smoothingFactor); + return smoothedSoundLevel; + }; + // Calculate the average levels for each frequency range + const soundLevel = calculateAverage(startRangeIndex, endRangeIndex); + + // Update the widget values + + const lowLevelWidget = this.widgets.find(w => w.name === "sound_level"); + if (lowLevelWidget) lowLevelWidget.value = soundLevel; + + animationFrameId = requestAnimationFrame(updateWidgetValueInRealTime); + } + }; + + // Function to start capturing audio from the microphone + const startMicrophoneCapture = () => { + // Only create the audio context and analyser once + if (!audioContext) { + audioContext = new (window.AudioContext || window.webkitAudioContext)(); + // Access the sample rate of the audio context + console.log(`Sample rate: ${audioContext.sampleRate}Hz`); + analyser = audioContext.createAnalyser(); + analyser.fftSize = 2048; + dataArray = new Uint8Array(analyser.frequencyBinCount); + // Get the range values from widgets (assumed to be in Hz) + const lowRangeWidget = this.widgets.find(w => w.name === "low_range_hz"); + if (lowRangeWidget) startRangeHz = lowRangeWidget.value; + + const midRangeWidget = this.widgets.find(w => w.name === "mid_range_hz"); + if (midRangeWidget) endRangeHz = midRangeWidget.value; + } + + navigator.mediaDevices.getUserMedia({ audio: true }).then(stream => { + microphoneStream = stream; + const microphone = audioContext.createMediaStreamSource(stream); + microphone.connect(analyser); + updateWidgetValueInRealTime(); + }).catch(error => { + console.error('Access to microphone was denied or an error occurred:', error); + }); + }; + + // Function to stop capturing audio from the microphone + const stopMicrophoneCapture = () => { + if (animationFrameId) { + cancelAnimationFrame(animationFrameId); + } + if (microphoneStream) { + microphoneStream.getTracks().forEach(track => track.stop()); + } + if (audioContext) { + audioContext.close(); + // Reset audioContext to ensure it can be created again when starting + audioContext = null; + } + }; + + // Add start button + this.addWidget("button", "Start mic capture", null, startMicrophoneCapture); + + // Add stop button + this.addWidget("button", "Stop mic capture", null, stopMicrophoneCapture); + }; + break; + case "SaveImageKJ": + const onNodeCreated = nodeType.prototype.onNodeCreated; + nodeType.prototype.onNodeCreated = function() { + const r = onNodeCreated ? onNodeCreated.apply(this, arguments) : void 0; + const widget = this.widgets.find((w) => w.name === "filename_prefix"); + widget.serializeValue = () => { + return applyTextReplacements(app, widget.value); + }; + return r; + }; + break; + + } + + }, + async setup() { + // to keep Set/Get node virtual connections visible when offscreen + const originalComputeVisibleNodes = LGraphCanvas.prototype.computeVisibleNodes; + LGraphCanvas.prototype.computeVisibleNodes = function () { + const visibleNodesSet = new Set(originalComputeVisibleNodes.apply(this, arguments)); + for (const node of this.graph._nodes) { + if ((node.type === "SetNode" || node.type === "GetNode") && node.drawConnection) { + visibleNodesSet.add(node); + } + } + return Array.from(visibleNodesSet); + }; + + } +}); \ No newline at end of file diff --git a/custom_nodes/ComfyUI-KJNodes-main/web/js/point_editor.js b/custom_nodes/ComfyUI-KJNodes-main/web/js/point_editor.js new file mode 100644 index 0000000000000000000000000000000000000000..210591fa51d660296e28e29e37183757eaee05bb --- /dev/null +++ b/custom_nodes/ComfyUI-KJNodes-main/web/js/point_editor.js @@ -0,0 +1,736 @@ +import { app } from '../../../scripts/app.js' + +//from melmass +export function makeUUID() { + let dt = new Date().getTime() + const uuid = 'xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx'.replace(/[xy]/g, (c) => { + const r = ((dt + Math.random() * 16) % 16) | 0 + dt = Math.floor(dt / 16) + return (c === 'x' ? r : (r & 0x3) | 0x8).toString(16) + }) + return uuid +} + +export const loadScript = ( + FILE_URL, + async = true, + type = 'text/javascript', +) => { + return new Promise((resolve, reject) => { + try { + // Check if the script already exists + const existingScript = document.querySelector(`script[src="${FILE_URL}"]`) + if (existingScript) { + resolve({ status: true, message: 'Script already loaded' }) + return + } + + const scriptEle = document.createElement('script') + scriptEle.type = type + scriptEle.async = async + scriptEle.src = FILE_URL + + scriptEle.addEventListener('load', (ev) => { + resolve({ status: true }) + }) + + scriptEle.addEventListener('error', (ev) => { + reject({ + status: false, + message: `Failed to load the script ${FILE_URL}`, + }) + }) + + document.body.appendChild(scriptEle) + } catch (error) { + reject(error) + } + }) +} +const create_documentation_stylesheet = () => { + const tag = 'kj-pointseditor-stylesheet' + + let styleTag = document.head.querySelector(tag) + + if (!styleTag) { + styleTag = document.createElement('style') + styleTag.type = 'text/css' + styleTag.id = tag + styleTag.innerHTML = ` + .points-editor { + + position: absolute; + + font: 12px monospace; + line-height: 1.5em; + padding: 10px; + z-index: 0; + overflow: hidden; + } + ` + document.head.appendChild(styleTag) + } +} + +loadScript('/kjweb_async/svg-path-properties.min.js').catch((e) => { + console.log(e) +}) +loadScript('/kjweb_async/protovis.min.js').catch((e) => { + console.log(e) +}) +create_documentation_stylesheet() + +function chainCallback(object, property, callback) { + if (object == undefined) { + //This should not happen. + console.error("Tried to add callback to non-existant object") + return; + } + if (property in object) { + const callback_orig = object[property] + object[property] = function () { + const r = callback_orig.apply(this, arguments); + callback.apply(this, arguments); + return r + }; + } else { + object[property] = callback; + } +} +app.registerExtension({ + name: 'KJNodes.PointEditor', + + async beforeRegisterNodeDef(nodeType, nodeData) { + if (nodeData?.name === 'PointsEditor') { + chainCallback(nodeType.prototype, "onNodeCreated", function () { + + hideWidgetForGood(this, this.widgets.find(w => w.name === "coordinates")) + hideWidgetForGood(this, this.widgets.find(w => w.name === "neg_coordinates")) + hideWidgetForGood(this, this.widgets.find(w => w.name === "bboxes")) + + var element = document.createElement("div"); + this.uuid = makeUUID() + element.id = `points-editor-${this.uuid}` + + // fake image widget to allow copy/paste + const fakeimagewidget = this.addWidget("COMBO", "image", null, () => { }, {}); + hideWidgetForGood(this, fakeimagewidget) + + this.pointsEditor = this.addDOMWidget(nodeData.name, "PointsEditorWidget", element, { + serialize: false, + hideOnZoom: false, + }); + + // context menu + this.contextMenu = document.createElement("div"); + this.contextMenu.id = "context-menu"; + this.contextMenu.style.display = "none"; + this.contextMenu.style.position = "absolute"; + this.contextMenu.style.backgroundColor = "#202020"; + this.contextMenu.style.minWidth = "100px"; + this.contextMenu.style.boxShadow = "0px 8px 16px 0px rgba(0,0,0,0.2)"; + this.contextMenu.style.zIndex = "100"; + this.contextMenu.style.padding = "5px"; + + function styleMenuItem(menuItem) { + menuItem.style.display = "block"; + menuItem.style.padding = "5px"; + menuItem.style.color = "#FFF"; + menuItem.style.fontFamily = "Arial, sans-serif"; + menuItem.style.fontSize = "16px"; + menuItem.style.textDecoration = "none"; + menuItem.style.marginBottom = "5px"; + } + function createMenuItem(id, textContent) { + let menuItem = document.createElement("a"); + menuItem.href = "#"; + menuItem.id = `menu-item-${id}`; + menuItem.textContent = textContent; + styleMenuItem(menuItem); + return menuItem; + } + + // Create an array of menu items using the createMenuItem function + this.menuItems = [ + createMenuItem(0, "Load Image"), + createMenuItem(1, "Clear Image"), + ]; + + // Add mouseover and mouseout event listeners to each menu item for styling + this.menuItems.forEach(menuItem => { + menuItem.addEventListener('mouseover', function () { + this.style.backgroundColor = "gray"; + }); + + menuItem.addEventListener('mouseout', function () { + this.style.backgroundColor = "#202020"; + }); + }); + + // Append each menu item to the context menu + this.menuItems.forEach(menuItem => { + this.contextMenu.appendChild(menuItem); + }); + + document.body.appendChild(this.contextMenu); + + this.addWidget("button", "New canvas", null, () => { + if (!this.properties || !("points" in this.properties)) { + this.editor = new PointsEditor(this); + this.addProperty("points", this.constructor.type, "string"); + this.addProperty("neg_points", this.constructor.type, "string"); + + } + else { + this.editor = new PointsEditor(this, true); + } + }); + + this.setSize([550, 550]); + this.resizable = false; + this.pointsEditor.parentEl = document.createElement("div"); + this.pointsEditor.parentEl.className = "points-editor"; + this.pointsEditor.parentEl.id = `points-editor-${this.uuid}` + element.appendChild(this.pointsEditor.parentEl); + + chainCallback(this, "onConfigure", function () { + try { + this.editor = new PointsEditor(this); + } catch (error) { + console.error("An error occurred while configuring the editor:", error); + } + }); + chainCallback(this, "onExecuted", function (message) { + let bg_image = message["bg_image"]; + this.properties.imgData = { + name: "bg_image", + base64: bg_image + }; + this.editor.refreshBackgroundImage(this); + }); + + }); // onAfterGraphConfigured + }//node created + } //before register +})//register + +class PointsEditor { + constructor(context, reset = false) { + this.node = context; + this.reset = reset; + const self = this; // Keep a reference to the main class context + + console.log("creatingPointEditor") + + this.node.pasteFile = (file) => { + if (file.type.startsWith("image/")) { + this.handleImageFile(file); + return true; + } + return false; + }; + + this.node.onDragOver = function (e) { + if (e.dataTransfer && e.dataTransfer.items) { + return [...e.dataTransfer.items].some(f => f.kind === "file" && f.type.startsWith("image/")); + } + return false; + }; + + // On drop upload files + this.node.onDragDrop = (e) => { + console.log("onDragDrop called"); + let handled = false; + for (const file of e.dataTransfer.files) { + if (file.type.startsWith("image/")) { + this.handleImageFile(file); + handled = true; + } + } + return handled; + }; + + // context menu + this.createContextMenu(); + + if (reset && context.pointsEditor.element) { + context.pointsEditor.element.innerHTML = ''; // Clear the container + } + this.pos_coordWidget = context.widgets.find(w => w.name === "coordinates"); + this.neg_coordWidget = context.widgets.find(w => w.name === "neg_coordinates"); + this.pointsStoreWidget = context.widgets.find(w => w.name === "points_store"); + this.widthWidget = context.widgets.find(w => w.name === "width"); + this.heightWidget = context.widgets.find(w => w.name === "height"); + this.bboxStoreWidget = context.widgets.find(w => w.name === "bbox_store"); + this.bboxWidget = context.widgets.find(w => w.name === "bboxes"); + + //widget callbacks + this.widthWidget.callback = () => { + this.width = this.widthWidget.value; + if (this.width > 256) { + context.setSize([this.width + 45, context.size[1]]); + } + this.vis.width(this.width); + this.updateData(); + } + this.heightWidget.callback = () => { + this.height = this.heightWidget.value + this.vis.height(this.height) + context.setSize([context.size[0], this.height + 300]); + this.updateData(); + } + this.pointsStoreWidget.callback = () => { + this.points = JSON.parse(pointsStoreWidget.value).positive; + this.neg_points = JSON.parse(pointsStoreWidget.value).negative; + this.updateData(); + } + this.bboxStoreWidget.callback = () => { + this.bbox = JSON.parse(bboxStoreWidget.value) + this.updateData(); + } + + this.width = this.widthWidget.value; + this.height = this.heightWidget.value; + var i = 3; + this.points = []; + this.neg_points = []; + this.bbox = [{}]; + var drawing = false; + + // Initialize or reset points array + if (!reset && this.pointsStoreWidget.value != "") { + this.points = JSON.parse(this.pointsStoreWidget.value).positive; + this.neg_points = JSON.parse(this.pointsStoreWidget.value).negative; + this.bbox = JSON.parse(this.bboxStoreWidget.value); + console.log(this.bbox) + } else { + this.points = [ + { + x: this.width / 2, // Middle point horizontally centered + y: this.height / 2 // Middle point vertically centered + } + ]; + this.neg_points = [ + { + x: 0, // Middle point horizontally centered + y: 0 // Middle point vertically centered + } + ]; + const combinedPoints = { + positive: this.points, + negative: this.neg_points, + }; + this.pointsStoreWidget.value = JSON.stringify(combinedPoints); + this.bboxStoreWidget.value = JSON.stringify(this.bbox); + } + + //create main canvas panel + this.vis = new pv.Panel() + .width(this.width) + .height(this.height) + .fillStyle("#222") + .strokeStyle("gray") + .lineWidth(2) + .antialias(false) + .margin(10) + .event("mousedown", function () { + if (pv.event.shiftKey && pv.event.button === 2) { // Use pv.event to access the event object + let scaledMouse = { + x: this.mouse().x / app.canvas.ds.scale, + y: this.mouse().y / app.canvas.ds.scale + }; + i = self.neg_points.push(scaledMouse) - 1; + self.updateData(); + return this; + } + else if (pv.event.shiftKey) { + let scaledMouse = { + x: this.mouse().x / app.canvas.ds.scale, + y: this.mouse().y / app.canvas.ds.scale + }; + i = self.points.push(scaledMouse) - 1; + self.updateData(); + return this; + } + else if (pv.event.ctrlKey) { + console.log("start drawing at " + this.mouse().x / app.canvas.ds.scale + ", " + this.mouse().y / app.canvas.ds.scale); + drawing = true; + self.bbox[0].startX = this.mouse().x / app.canvas.ds.scale; + self.bbox[0].startY = this.mouse().y / app.canvas.ds.scale; + } + else if (pv.event.button === 2) { + self.node.contextMenu.style.display = 'block'; + self.node.contextMenu.style.left = `${pv.event.clientX}px`; + self.node.contextMenu.style.top = `${pv.event.clientY}px`; + } + }) + .event("mousemove", function () { + if (drawing) { + self.bbox[0].endX = this.mouse().x / app.canvas.ds.scale; + self.bbox[0].endY = this.mouse().y / app.canvas.ds.scale; + self.vis.render(); + } + }) + .event("mouseup", function () { + console.log("end drawing at " + this.mouse().x / app.canvas.ds.scale + ", " + this.mouse().y / app.canvas.ds.scale); + drawing = false; + self.updateData(); + }); + + this.backgroundImage = this.vis.add(pv.Image).visible(false) + + //create bounding box + this.bounding_box = this.vis.add(pv.Area) + .data(function () { + if (drawing || (self.bbox && self.bbox[0] && Object.keys(self.bbox[0]).length > 0)) { + return [self.bbox[0].startX, self.bbox[0].endX]; + } else { + return []; + } + }) + .bottom(function () {return self.height - Math.max(self.bbox[0].startY, self.bbox[0].endY); }) + .left(function (d) {return d; }) + .height(function () {return Math.abs(self.bbox[0].startY - self.bbox[0].endY);}) + .fillStyle("rgba(70, 130, 180, 0.5)") + .strokeStyle("steelblue") + .visible(function () {return drawing || Object.keys(self.bbox[0]).length > 0; }) + .add(pv.Dot) + .visible(function () {return drawing || Object.keys(self.bbox[0]).length > 0; }) + .data(() => { + if (self.bbox && Object.keys(self.bbox[0]).length > 0) { + return [{ + x: self.bbox[0].endX, + y: self.bbox[0].endY + }]; + } else { + return []; + } + }) + .left(d => d.x) + .top(d => d.y) + .radius(Math.log(Math.min(self.width, self.height)) * 1) + .shape("square") + .cursor("move") + .strokeStyle("steelblue") + .lineWidth(2) + .fillStyle(function () { return "rgba(100, 100, 100, 0.6)"; }) + .event("mousedown", pv.Behavior.drag()) + .event("drag", function () { + let adjustedX = this.mouse().x / app.canvas.ds.scale; // Adjust the new position by the inverse of the scale factor + let adjustedY = this.mouse().y / app.canvas.ds.scale; + + // Adjust the new position if it would place the dot outside the bounds of the vis.Panel + adjustedX = Math.max(0, Math.min(self.vis.width(), adjustedX)); + adjustedY = Math.max(0, Math.min(self.vis.height(), adjustedY)); + self.bbox[0].endX = this.mouse().x / app.canvas.ds.scale; + self.bbox[0].endY = this.mouse().y / app.canvas.ds.scale; + self.vis.render(); + }) + .event("dragend", function () { + self.updateData(); + }); + + //create positive points + this.vis.add(pv.Dot) + .data(() => this.points) + .left(d => d.x) + .top(d => d.y) + .radius(Math.log(Math.min(self.width, self.height)) * 4) + .shape("circle") + .cursor("move") + .strokeStyle(function () { return i == this.index ? "#07f907" : "#139613"; }) + .lineWidth(4) + .fillStyle(function () { return "rgba(100, 100, 100, 0.6)"; }) + .event("mousedown", pv.Behavior.drag()) + .event("dragstart", function () { + i = this.index; + }) + .event("dragend", function () { + if (pv.event.button === 2 && i !== 0 && i !== self.points.length - 1) { + this.index = i; + self.points.splice(i--, 1); + } + self.updateData(); + + }) + .event("drag", function () { + let adjustedX = this.mouse().x / app.canvas.ds.scale; // Adjust the new X position by the inverse of the scale factor + let adjustedY = this.mouse().y / app.canvas.ds.scale; // Adjust the new Y position by the inverse of the scale factor + // Determine the bounds of the vis.Panel + const panelWidth = self.vis.width(); + const panelHeight = self.vis.height(); + + // Adjust the new position if it would place the dot outside the bounds of the vis.Panel + adjustedX = Math.max(0, Math.min(panelWidth, adjustedX)); + adjustedY = Math.max(0, Math.min(panelHeight, adjustedY)); + self.points[this.index] = { x: adjustedX, y: adjustedY }; // Update the point's position + self.vis.render(); // Re-render the visualization to reflect the new position + }) + + .anchor("center") + .add(pv.Label) + .left(d => d.x < this.width / 2 ? d.x + 30 : d.x - 35) // Shift label to right if on left half, otherwise shift to left + .top(d => d.y < this.height / 2 ? d.y + 25 : d.y - 25) // Shift label down if on top half, otherwise shift up + .font(25 + "px sans-serif") + .text(d => {return this.points.indexOf(d); }) + .textStyle("#139613") + .textShadow("2px 2px 2px black") + .add(pv.Dot) // Add smaller point in the center + .data(() => this.points) + .left(d => d.x) + .top(d => d.y) + .radius(2) // Smaller radius for the center point + .shape("circle") + .fillStyle("red") // Color for the center point + .lineWidth(1); // Stroke thickness for the center point + + //create negative points + this.vis.add(pv.Dot) + .data(() => this.neg_points) + .left(d => d.x) + .top(d => d.y) + .radius(Math.log(Math.min(self.width, self.height)) * 4) + .shape("circle") + .cursor("move") + .strokeStyle(function () { return i == this.index ? "#f91111" : "#891616"; }) + .lineWidth(4) + .fillStyle(function () { return "rgba(100, 100, 100, 0.6)"; }) + .event("mousedown", pv.Behavior.drag()) + .event("dragstart", function () { + i = this.index; + }) + .event("dragend", function () { + if (pv.event.button === 2 && i !== 0 && i !== self.neg_points.length - 1) { + this.index = i; + self.neg_points.splice(i--, 1); + } + self.updateData(); + + }) + .event("drag", function () { + let adjustedX = this.mouse().x / app.canvas.ds.scale; // Adjust the new X position by the inverse of the scale factor + let adjustedY = this.mouse().y / app.canvas.ds.scale; // Adjust the new Y position by the inverse of the scale factor + // Determine the bounds of the vis.Panel + const panelWidth = self.vis.width(); + const panelHeight = self.vis.height(); + + // Adjust the new position if it would place the dot outside the bounds of the vis.Panel + adjustedX = Math.max(0, Math.min(panelWidth, adjustedX)); + adjustedY = Math.max(0, Math.min(panelHeight, adjustedY)); + self.neg_points[this.index] = { x: adjustedX, y: adjustedY }; // Update the point's position + self.vis.render(); // Re-render the visualization to reflect the new position + }) + .anchor("center") + .add(pv.Label) + .left(d => d.x < this.width / 2 ? d.x + 30 : d.x - 35) // Shift label to right if on left half, otherwise shift to left + .top(d => d.y < this.height / 2 ? d.y + 25 : d.y - 25) // Shift label down if on top half, otherwise shift up + .font(25 + "px sans-serif") + .text(d => {return this.neg_points.indexOf(d); }) + .textStyle("red") + .textShadow("2px 2px 2px black") + .add(pv.Dot) // Add smaller point in the center + .data(() => this.neg_points) + .left(d => d.x) + .top(d => d.y) + .radius(2) // Smaller radius for the center point + .shape("circle") + .fillStyle("red") // Color for the center point + .lineWidth(1); // Stroke thickness for the center point + + if (this.points.length != 0) { + this.vis.render(); + } + + var svgElement = this.vis.canvas(); + svgElement.style['zIndex'] = "2" + svgElement.style['position'] = "relative" + this.node.pointsEditor.element.appendChild(svgElement); + + if (this.width > 256) { + this.node.setSize([this.width + 45, this.node.size[1]]); + } + this.node.setSize([this.node.size[0], this.height + 300]); + this.updateData(); + this.refreshBackgroundImage(); + + }//end constructor + + updateData = () => { + if (!this.points || this.points.length === 0) { + console.log("no points"); + return; + } + const combinedPoints = { + positive: this.points, + negative: this.neg_points, + }; + this.pointsStoreWidget.value = JSON.stringify(combinedPoints); + this.pos_coordWidget.value = JSON.stringify(this.points); + this.neg_coordWidget.value = JSON.stringify(this.neg_points); + + if (this.bbox.length != 0) { + let bboxString = JSON.stringify(this.bbox); + this.bboxStoreWidget.value = bboxString; + this.bboxWidget.value = bboxString; + } + + this.vis.render(); + }; + + handleImageLoad = (img, file, base64String) => { + console.log(img.width, img.height); // Access width and height here + this.widthWidget.value = img.width; + this.heightWidget.value = img.height; + + if (img.width != this.vis.width() || img.height != this.vis.height()) { + if (img.width > 256) { + this.node.setSize([img.width + 45, this.node.size[1]]); + } + this.node.setSize([this.node.size[0], img.height + 300]); + this.vis.width(img.width); + this.vis.height(img.height); + this.height = img.height; + this.width = img.width; + this.updateData(); + } + this.backgroundImage.url(file ? URL.createObjectURL(file) : `data:${this.node.properties.imgData.type};base64,${base64String}`).visible(true).root.render(); + }; + + processImage = (img, file) => { + const canvas = document.createElement('canvas'); + const ctx = canvas.getContext('2d'); + + const maxWidth = 800; // maximum width + const maxHeight = 600; // maximum height + let width = img.width; + let height = img.height; + + // Calculate the new dimensions while preserving the aspect ratio + if (width > height) { + if (width > maxWidth) { + height *= maxWidth / width; + width = maxWidth; + } + } else { + if (height > maxHeight) { + width *= maxHeight / height; + height = maxHeight; + } + } + + canvas.width = width; + canvas.height = height; + ctx.drawImage(img, 0, 0, width, height); + + // Get the compressed image data as a Base64 string + const base64String = canvas.toDataURL('image/jpeg', 0.5).replace('data:', '').replace(/^.+,/, ''); // 0.5 is the quality from 0 to 1 + + this.node.properties.imgData = { + name: file.name, + lastModified: file.lastModified, + size: file.size, + type: file.type, + base64: base64String + }; + handleImageLoad(img, file, base64String); +}; + + handleImageFile = (file) => { + const reader = new FileReader(); + reader.onloadend = () => { + const img = new Image(); + img.src = reader.result; + img.onload = () => processImage(img, file); + }; + reader.readAsDataURL(file); + + const imageUrl = URL.createObjectURL(file); + const img = new Image(); + img.src = imageUrl; + img.onload = () => this.handleImageLoad(img, file, null); + }; + + refreshBackgroundImage = () => { + if (this.node.properties.imgData && this.node.properties.imgData.base64) { + const base64String = this.node.properties.imgData.base64; + const imageUrl = `data:${this.node.properties.imgData.type};base64,${base64String}`; + const img = new Image(); + img.src = imageUrl; + img.onload = () => this.handleImageLoad(img, null, base64String); + } + }; + + createContextMenu = () => { + self = this; + document.addEventListener('contextmenu', function (e) { + e.preventDefault(); + }); + + document.addEventListener('click', function (e) { + if (!self.node.contextMenu.contains(e.target)) { + self.node.contextMenu.style.display = 'none'; + } + }); + + this.node.menuItems.forEach((menuItem, index) => { + self = this; + menuItem.addEventListener('click', function (e) { + e.preventDefault(); + switch (index) { + case 0: + // Create file input element + const fileInput = document.createElement('input'); + fileInput.type = 'file'; + fileInput.accept = 'image/*'; // Accept only image files + + // Listen for file selection + fileInput.addEventListener('change', function (event) { + const file = event.target.files[0]; // Get the selected file + + if (file) { + const imageUrl = URL.createObjectURL(file); + let img = new Image(); + img.src = imageUrl; + img.onload = () => self.handleImageLoad(img, file, null); + } + }); + + fileInput.click(); + + self.node.contextMenu.style.display = 'none'; + break; + case 1: + self.backgroundImage.visible(false).root.render(); + self.node.properties.imgData = null; + self.node.contextMenu.style.display = 'none'; + break; + } + }); + }); + }//end createContextMenu +}//end class + + +//from melmass +export function hideWidgetForGood(node, widget, suffix = '') { + widget.origType = widget.type + widget.origComputeSize = widget.computeSize + widget.origSerializeValue = widget.serializeValue + widget.computeSize = () => [0, -4] // -4 is due to the gap litegraph adds between widgets automatically + widget.type = "converted-widget" + suffix + // widget.serializeValue = () => { + // // Prevent serializing the widget if we have no input linked + // const w = node.inputs?.find((i) => i.widget?.name === widget.name); + // if (w?.link == null) { + // return undefined; + // } + // return widget.origSerializeValue ? widget.origSerializeValue() : widget.value; + // }; + + // Hide any linked widgets, e.g. seed+seedControl + if (widget.linkedWidgets) { + for (const w of widget.linkedWidgets) { + hideWidgetForGood(node, w, ':' + widget.name) + } + } +} \ No newline at end of file diff --git a/custom_nodes/ComfyUI-KJNodes-main/web/js/setgetnodes.js b/custom_nodes/ComfyUI-KJNodes-main/web/js/setgetnodes.js new file mode 100644 index 0000000000000000000000000000000000000000..2a90dcc5895d83eb80939b5041e874405dc84c86 --- /dev/null +++ b/custom_nodes/ComfyUI-KJNodes-main/web/js/setgetnodes.js @@ -0,0 +1,564 @@ +import { app } from "../../../scripts/app.js"; + +//based on diffus3's SetGet: https://github.com/diffus3/ComfyUI-extensions + +// Nodes that allow you to tunnel connections for cleaner graphs +function setColorAndBgColor(type) { + const colorMap = { + "MODEL": LGraphCanvas.node_colors.blue, + "LATENT": LGraphCanvas.node_colors.purple, + "VAE": LGraphCanvas.node_colors.red, + "CONDITIONING": LGraphCanvas.node_colors.brown, + "IMAGE": LGraphCanvas.node_colors.pale_blue, + "CLIP": LGraphCanvas.node_colors.yellow, + "FLOAT": LGraphCanvas.node_colors.green, + "MASK": { color: "#1c5715", bgcolor: "#1f401b"}, + "INT": { color: "#1b4669", bgcolor: "#29699c"}, + "CONTROL_NET": { color: "#156653", bgcolor: "#1c453b"}, + "NOISE": { color: "#2e2e2e", bgcolor: "#242121"}, + "GUIDER": { color: "#3c7878", bgcolor: "#1c453b"}, + "SAMPLER": { color: "#614a4a", bgcolor: "#3b2c2c"}, + "SIGMAS": { color: "#485248", bgcolor: "#272e27"}, + + }; + + const colors = colorMap[type]; + if (colors) { + this.color = colors.color; + this.bgcolor = colors.bgcolor; + } +} +let disablePrefix = app.ui.settings.getSettingValue("KJNodes.disablePrefix") +const LGraphNode = LiteGraph.LGraphNode + +function showAlert(message) { + app.extensionManager.toast.add({ + severity: 'warn', + summary: "KJ Get/Set", + detail: `${message}. Most likely you're missing custom nodes`, + life: 5000, + }) +} +app.registerExtension({ + name: "SetNode", + registerCustomNodes() { + class SetNode extends LGraphNode { + defaultVisibility = true; + serialize_widgets = true; + drawConnection = false; + currentGetters = null; + slotColor = "#FFF"; + canvas = app.canvas; + menuEntry = "Show connections"; + + constructor(title) { + super(title) + if (!this.properties) { + this.properties = { + "previousName": "" + }; + } + this.properties.showOutputText = SetNode.defaultVisibility; + + const node = this; + + this.addWidget( + "text", + "Constant", + '', + (s, t, u, v, x) => { + node.validateName(node.graph); + if(this.widgets[0].value !== ''){ + this.title = (!disablePrefix ? "Set_" : "") + this.widgets[0].value; + } + this.update(); + this.properties.previousName = this.widgets[0].value; + }, + {} + ) + + this.addInput("*", "*"); + this.addOutput("*", '*'); + + this.onConnectionsChange = function( + slotType, //1 = input, 2 = output + slot, + isChangeConnect, + link_info, + output + ) { + //On Disconnect + if (slotType == 1 && !isChangeConnect) { + if(this.inputs[slot].name === ''){ + this.inputs[slot].type = '*'; + this.inputs[slot].name = '*'; + this.title = "Set" + } + } + if (slotType == 2 && !isChangeConnect) { + this.outputs[slot].type = '*'; + this.outputs[slot].name = '*'; + + } + //On Connect + if (link_info && node.graph && slotType == 1 && isChangeConnect) { + const fromNode = node.graph._nodes.find((otherNode) => otherNode.id == link_info.origin_id); + + if (fromNode && fromNode.outputs && fromNode.outputs[link_info.origin_slot]) { + const type = fromNode.outputs[link_info.origin_slot].type; + + if (this.title === "Set"){ + this.title = (!disablePrefix ? "Set_" : "") + type; + } + if (this.widgets[0].value === '*'){ + this.widgets[0].value = type + } + + this.validateName(node.graph); + this.inputs[0].type = type; + this.inputs[0].name = type; + + if (app.ui.settings.getSettingValue("KJNodes.nodeAutoColor")){ + setColorAndBgColor.call(this, type); + } + } else { + showAlert("node input undefined.") + } + } + if (link_info && node.graph && slotType == 2 && isChangeConnect) { + const fromNode = node.graph._nodes.find((otherNode) => otherNode.id == link_info.origin_id); + + if (fromNode && fromNode.inputs && fromNode.inputs[link_info.origin_slot]) { + const type = fromNode.inputs[link_info.origin_slot].type; + + this.outputs[0].type = type; + this.outputs[0].name = type; + } else { + showAlert('node output undefined'); + } + } + + + //Update either way + this.update(); + } + + this.validateName = function(graph) { + let widgetValue = node.widgets[0].value; + + if (widgetValue !== '') { + let tries = 0; + const existingValues = new Set(); + + graph._nodes.forEach(otherNode => { + if (otherNode !== this && otherNode.type === 'SetNode') { + existingValues.add(otherNode.widgets[0].value); + } + }); + + while (existingValues.has(widgetValue)) { + widgetValue = node.widgets[0].value + "_" + tries; + tries++; + } + + node.widgets[0].value = widgetValue; + this.update(); + } + } + + this.clone = function () { + const cloned = SetNode.prototype.clone.apply(this); + cloned.inputs[0].name = '*'; + cloned.inputs[0].type = '*'; + cloned.value = ''; + cloned.properties.previousName = ''; + cloned.size = cloned.computeSize(); + return cloned; + }; + + this.onAdded = function(graph) { + this.validateName(graph); + } + + + this.update = function() { + if (!node.graph) { + return; + } + + const getters = this.findGetters(node.graph); + getters.forEach(getter => { + getter.setType(this.inputs[0].type); + }); + + if (this.widgets[0].value) { + const gettersWithPreviousName = this.findGetters(node.graph, true); + gettersWithPreviousName.forEach(getter => { + getter.setName(this.widgets[0].value); + }); + } + + const allGetters = node.graph._nodes.filter(otherNode => otherNode.type === "GetNode"); + allGetters.forEach(otherNode => { + if (otherNode.setComboValues) { + otherNode.setComboValues(); + } + }); + } + + + this.findGetters = function(graph, checkForPreviousName) { + const name = checkForPreviousName ? this.properties.previousName : this.widgets[0].value; + return graph._nodes.filter(otherNode => otherNode.type === 'GetNode' && otherNode.widgets[0].value === name && name !== ''); + } + + + // This node is purely frontend and does not impact the resulting prompt so should not be serialized + this.isVirtualNode = true; + } + + + onRemoved() { + const allGetters = this.graph._nodes.filter((otherNode) => otherNode.type == "GetNode"); + allGetters.forEach((otherNode) => { + if (otherNode.setComboValues) { + otherNode.setComboValues([this]); + } + }) + } + getExtraMenuOptions(_, options) { + this.menuEntry = this.drawConnection ? "Hide connections" : "Show connections"; + options.unshift( + { + content: this.menuEntry, + callback: () => { + this.currentGetters = this.findGetters(this.graph); + if (this.currentGetters.length == 0) return; + let linkType = (this.currentGetters[0].outputs[0].type); + this.slotColor = this.canvas.default_connection_color_byType[linkType] + this.menuEntry = this.drawConnection ? "Hide connections" : "Show connections"; + this.drawConnection = !this.drawConnection; + this.canvas.setDirty(true, true); + + }, + has_submenu: true, + submenu: { + title: "Color", + options: [ + { + content: "Highlight", + callback: () => { + this.slotColor = "orange" + this.canvas.setDirty(true, true); + } + } + ], + }, + }, + { + content: "Hide all connections", + callback: () => { + const allGetters = this.graph._nodes.filter(otherNode => otherNode.type === "GetNode" || otherNode.type === "SetNode"); + allGetters.forEach(otherNode => { + otherNode.drawConnection = false; + console.log(otherNode); + }); + + this.menuEntry = "Show connections"; + this.drawConnection = false + this.canvas.setDirty(true, true); + + }, + + }, + ); + // Dynamically add a submenu for all getters + this.currentGetters = this.findGetters(this.graph); + if (this.currentGetters) { + + let gettersSubmenu = this.currentGetters.map(getter => ({ + + content: `${getter.title} id: ${getter.id}`, + callback: () => { + this.canvas.centerOnNode(getter); + this.canvas.selectNode(getter, false); + this.canvas.setDirty(true, true); + + }, + })); + + options.unshift({ + content: "Getters", + has_submenu: true, + submenu: { + title: "GetNodes", + options: gettersSubmenu, + } + }); + } + } + + + onDrawForeground(ctx, lGraphCanvas) { + if (this.drawConnection) { + this._drawVirtualLinks(lGraphCanvas, ctx); + } + } + // onDrawCollapsed(ctx, lGraphCanvas) { + // if (this.drawConnection) { + // this._drawVirtualLinks(lGraphCanvas, ctx); + // } + // } + _drawVirtualLinks(lGraphCanvas, ctx) { + if (!this.currentGetters?.length) return; + var title = this.getTitle ? this.getTitle() : this.title; + var title_width = ctx.measureText(title).width; + if (!this.flags.collapsed) { + var start_node_slotpos = [ + this.size[0], + LiteGraph.NODE_TITLE_HEIGHT * 0.5, + ]; + } + else { + + var start_node_slotpos = [ + title_width + 55, + -15, + + ]; + } + // Provide a default link object with necessary properties, to avoid errors as link can't be null anymore + const defaultLink = { type: 'default', color: this.slotColor }; + + for (const getter of this.currentGetters) { + if (!this.flags.collapsed) { + var end_node_slotpos = this.getConnectionPos(false, 0); + end_node_slotpos = [ + getter.pos[0] - end_node_slotpos[0] + this.size[0], + getter.pos[1] - end_node_slotpos[1] + ]; + } + else { + var end_node_slotpos = this.getConnectionPos(false, 0); + end_node_slotpos = [ + getter.pos[0] - end_node_slotpos[0] + title_width + 50, + getter.pos[1] - end_node_slotpos[1] - 30 + ]; + } + lGraphCanvas.renderLink( + ctx, + start_node_slotpos, + end_node_slotpos, + defaultLink, + false, + null, + this.slotColor, + LiteGraph.RIGHT, + LiteGraph.LEFT + ); + } + } + } + + LiteGraph.registerNodeType( + "SetNode", + Object.assign(SetNode, { + title: "Set", + }) + ); + + SetNode.category = "KJNodes"; + }, +}); + +app.registerExtension({ + name: "GetNode", + registerCustomNodes() { + class GetNode extends LGraphNode { + + defaultVisibility = true; + serialize_widgets = true; + drawConnection = false; + slotColor = "#FFF"; + currentSetter = null; + canvas = app.canvas; + + constructor(title) { + super(title) + if (!this.properties) { + this.properties = {}; + } + this.properties.showOutputText = GetNode.defaultVisibility; + const node = this; + this.addWidget( + "combo", + "Constant", + "", + (e) => { + this.onRename(); + }, + { + values: () => { + const setterNodes = node.graph._nodes.filter((otherNode) => otherNode.type == 'SetNode'); + return setterNodes.map((otherNode) => otherNode.widgets[0].value).sort(); + } + } + ) + + this.addOutput("*", '*'); + this.onConnectionsChange = function( + slotType, //0 = output, 1 = input + slot, //self-explanatory + isChangeConnect, + link_info, + output + ) { + this.validateLinks(); + } + + this.setName = function(name) { + node.widgets[0].value = name; + node.onRename(); + node.serialize(); + } + + this.onRename = function() { + const setter = this.findSetter(node.graph); + if (setter) { + let linkType = (setter.inputs[0].type); + + this.setType(linkType); + this.title = (!disablePrefix ? "Get_" : "") + setter.widgets[0].value; + + if (app.ui.settings.getSettingValue("KJNodes.nodeAutoColor")){ + setColorAndBgColor.call(this, linkType); + } + + } else { + this.setType('*'); + } + } + + this.clone = function () { + const cloned = GetNode.prototype.clone.apply(this); + cloned.size = cloned.computeSize(); + return cloned; + }; + + this.validateLinks = function() { + if (this.outputs[0].type !== '*' && this.outputs[0].links) { + this.outputs[0].links.filter(linkId => { + const link = node.graph.links[linkId]; + return link && (!link.type.split(",").includes(this.outputs[0].type) && link.type !== '*'); + }).forEach(linkId => { + node.graph.removeLink(linkId); + }); + } + }; + + this.setType = function(type) { + this.outputs[0].name = type; + this.outputs[0].type = type; + this.validateLinks(); + } + + this.findSetter = function(graph) { + const name = this.widgets[0].value; + const foundNode = graph._nodes.find(otherNode => otherNode.type === 'SetNode' && otherNode.widgets[0].value === name && name !== ''); + return foundNode; + }; + + this.goToSetter = function() { + const setter = this.findSetter(this.graph); + this.canvas.centerOnNode(setter); + this.canvas.selectNode(setter, false); + }; + + // This node is purely frontend and does not impact the resulting prompt so should not be serialized + this.isVirtualNode = true; + } + + getInputLink(slot) { + const setter = this.findSetter(this.graph); + + if (setter) { + const slotInfo = setter.inputs[slot]; + const link = this.graph.links[slotInfo.link]; + return link; + } else { + const errorMessage = "No SetNode found for " + this.widgets[0].value + "(" + this.type + ")"; + showAlert(errorMessage); + //throw new Error(errorMessage); + } + } + onAdded(graph) { + } + getExtraMenuOptions(_, options) { + let menuEntry = this.drawConnection ? "Hide connections" : "Show connections"; + + options.unshift( + { + content: "Go to setter", + callback: () => { + this.goToSetter(); + }, + }, + { + content: menuEntry, + callback: () => { + this.currentSetter = this.findSetter(this.graph); + if (this.currentSetter.length == 0) return; + let linkType = (this.currentSetter.inputs[0].type); + this.drawConnection = !this.drawConnection; + this.slotColor = this.canvas.default_connection_color_byType[linkType] + menuEntry = this.drawConnection ? "Hide connections" : "Show connections"; + this.canvas.setDirty(true, true); + }, + }, + ); + } + + onDrawForeground(ctx, lGraphCanvas) { + if (this.drawConnection) { + this._drawVirtualLink(lGraphCanvas, ctx); + } + } + // onDrawCollapsed(ctx, lGraphCanvas) { + // if (this.drawConnection) { + // this._drawVirtualLink(lGraphCanvas, ctx); + // } + // } + _drawVirtualLink(lGraphCanvas, ctx) { + if (!this.currentSetter) return; + + // Provide a default link object with necessary properties, to avoid errors as link can't be null anymore + const defaultLink = { type: 'default', color: this.slotColor }; + + let start_node_slotpos = this.currentSetter.getConnectionPos(false, 0); + start_node_slotpos = [ + start_node_slotpos[0] - this.pos[0], + start_node_slotpos[1] - this.pos[1], + ]; + let end_node_slotpos = [0, -LiteGraph.NODE_TITLE_HEIGHT * 0.5]; + lGraphCanvas.renderLink( + ctx, + start_node_slotpos, + end_node_slotpos, + defaultLink, + false, + null, + this.slotColor + ); + } + } + + LiteGraph.registerNodeType( + "GetNode", + Object.assign(GetNode, { + title: "Get", + }) + ); + + GetNode.category = "KJNodes"; + }, +}); diff --git a/custom_nodes/ComfyUI-KJNodes-main/web/js/spline_editor.js b/custom_nodes/ComfyUI-KJNodes-main/web/js/spline_editor.js new file mode 100644 index 0000000000000000000000000000000000000000..a8085b9a7f40e1bfbf826b04c5b280180e5ab72d --- /dev/null +++ b/custom_nodes/ComfyUI-KJNodes-main/web/js/spline_editor.js @@ -0,0 +1,866 @@ +import { app } from '../../../scripts/app.js' + +//from melmass +export function makeUUID() { + let dt = new Date().getTime() + const uuid = 'xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx'.replace(/[xy]/g, (c) => { + const r = ((dt + Math.random() * 16) % 16) | 0 + dt = Math.floor(dt / 16) + return (c === 'x' ? r : (r & 0x3) | 0x8).toString(16) + }) + return uuid +} + +export const loadScript = ( + FILE_URL, + async = true, + type = 'text/javascript', + ) => { + return new Promise((resolve, reject) => { + try { + // Check if the script already exists + const existingScript = document.querySelector(`script[src="${FILE_URL}"]`) + if (existingScript) { + resolve({ status: true, message: 'Script already loaded' }) + return + } + + const scriptEle = document.createElement('script') + scriptEle.type = type + scriptEle.async = async + scriptEle.src = FILE_URL + + scriptEle.addEventListener('load', (ev) => { + resolve({ status: true }) + }) + + scriptEle.addEventListener('error', (ev) => { + reject({ + status: false, + message: `Failed to load the script ${FILE_URL}`, + }) + }) + + document.body.appendChild(scriptEle) + } catch (error) { + reject(error) + } + }) + } + const create_documentation_stylesheet = () => { + const tag = 'kj-splineditor-stylesheet' + + let styleTag = document.head.querySelector(tag) + + if (!styleTag) { + styleTag = document.createElement('style') + styleTag.type = 'text/css' + styleTag.id = tag + styleTag.innerHTML = ` + .spline-editor { + + position: absolute; + + font: 12px monospace; + line-height: 1.5em; + padding: 10px; + z-index: 0; + overflow: hidden; + } + ` + document.head.appendChild(styleTag) + } + } + +loadScript('/kjweb_async/svg-path-properties.min.js').catch((e) => { + console.log(e) +}) +loadScript('/kjweb_async/protovis.min.js').catch((e) => { + console.log(e) +}) +create_documentation_stylesheet() + +function chainCallback(object, property, callback) { + if (object == undefined) { + //This should not happen. + console.error("Tried to add callback to non-existant object") + return; + } + if (property in object) { + const callback_orig = object[property] + object[property] = function () { + const r = callback_orig.apply(this, arguments); + callback.apply(this, arguments); + return r + }; + } else { + object[property] = callback; + } +} +app.registerExtension({ + name: 'KJNodes.SplineEditor', + + async beforeRegisterNodeDef(nodeType, nodeData) { + if (nodeData?.name === 'SplineEditor') { + chainCallback(nodeType.prototype, "onNodeCreated", function () { + + hideWidgetForGood(this, this.widgets.find(w => w.name === "coordinates")) + + var element = document.createElement("div"); + this.uuid = makeUUID() + element.id = `spline-editor-${this.uuid}` + + // fake image widget to allow copy/paste + const fakeimagewidget = this.addWidget("COMBO", "image", null, () => { }, {}); + hideWidgetForGood(this, fakeimagewidget) + + this.splineEditor = this.addDOMWidget(nodeData.name, "SplineEditorWidget", element, { + serialize: false, + hideOnZoom: false, + }); + + // context menu + this.contextMenu = document.createElement("div"); + this.contextMenu.className = 'spline-editor-context-menu'; + this.contextMenu.id = "context-menu"; + this.contextMenu.style.display = "none"; + this.contextMenu.style.position = "absolute"; + this.contextMenu.style.backgroundColor = "#202020"; + this.contextMenu.style.minWidth = "100px"; + this.contextMenu.style.boxShadow = "0px 8px 16px 0px rgba(0,0,0,0.2)"; + this.contextMenu.style.zIndex = "100"; + this.contextMenu.style.padding = "5px"; + + function styleMenuItem(menuItem) { + menuItem.style.display = "block"; + menuItem.style.padding = "5px"; + menuItem.style.color = "#FFF"; + menuItem.style.fontFamily = "Arial, sans-serif"; + menuItem.style.fontSize = "16px"; + menuItem.style.textDecoration = "none"; + menuItem.style.marginBottom = "5px"; + } + function createMenuItem(id, textContent) { + let menuItem = document.createElement("a"); + menuItem.href = "#"; + menuItem.id = `menu-item-${id}`; + menuItem.textContent = textContent; + styleMenuItem(menuItem); + return menuItem; + } + + // Create an array of menu items using the createMenuItem function + this.menuItems = [ + createMenuItem(0, "Toggle handles"), + createMenuItem(1, "Display sample points"), + createMenuItem(2, "Switch point shape"), + createMenuItem(3, "Background image"), + createMenuItem(4, "Invert point order"), + createMenuItem(5, "Clear Image"), + ]; + + // Add mouseover and mouseout event listeners to each menu item for styling + this.menuItems.forEach(menuItem => { + menuItem.addEventListener('mouseover', function() { + this.style.backgroundColor = "gray"; + }); + + menuItem.addEventListener('mouseout', function() { + this.style.backgroundColor = "#202020"; + }); + }); + + // Append each menu item to the context menu + this.menuItems.forEach(menuItem => { + this.contextMenu.appendChild(menuItem); + }); + + document.body.appendChild(this.contextMenu); + + this.addWidget("button", "New spline", null, () => { + if (!this.properties || !("points" in this.properties)) { + this.editor = new SplineEditor(this); + this.addProperty("points", this.constructor.type, "string"); + } + else { + this.editor = new SplineEditor(this, true); + } + }); + + this.setSize([550, 950]); + this.resizable = false; + this.splineEditor.parentEl = document.createElement("div"); + this.splineEditor.parentEl.className = "spline-editor"; + this.splineEditor.parentEl.id = `spline-editor-${this.uuid}` + element.appendChild(this.splineEditor.parentEl); + + chainCallback(this, "onConfigure", function () { + try { + this.editor = new SplineEditor(this); + } catch (error) { + console.error("An error occurred while configuring the editor:", error); + } + }); + chainCallback(this, "onExecuted", function (message) { + let bg_image = message["bg_image"]; + this.properties.imgData = { + name: "bg_image", + base64: bg_image + }; + this.editor.refreshBackgroundImage(this); + }); + + }); // onAfterGraphConfigured + }//node created + } //before register +})//register + + +class SplineEditor{ + constructor(context, reset = false) { + this.node = context; + this.reset=reset; + const self = this; + console.log("creatingSplineEditor") + + this.node.pasteFile = (file) => { + if (file.type.startsWith("image/")) { + this.handleImageFile(file); + return true; + } + return false; + }; + + this.node.onDragOver = function (e) { + if (e.dataTransfer && e.dataTransfer.items) { + return [...e.dataTransfer.items].some(f => f.kind === "file" && f.type.startsWith("image/")); + } + return false; + }; + + // On drop upload files + this.node.onDragDrop = (e) => { + console.log("onDragDrop called"); + let handled = false; + for (const file of e.dataTransfer.files) { + if (file.type.startsWith("image/")) { + this.handleImageFile(file); + handled = true; + } + } + return handled; + }; + + // context menu + this.createContextMenu(); + + + this.dotShape = "circle"; + this.drawSamplePoints = false; + + if (reset && context.splineEditor.element) { + context.splineEditor.element.innerHTML = ''; // Clear the container + } + this.coordWidget = context.widgets.find(w => w.name === "coordinates"); + this.interpolationWidget = context.widgets.find(w => w.name === "interpolation"); + this.pointsWidget = context.widgets.find(w => w.name === "points_to_sample"); + this.pointsStoreWidget = context.widgets.find(w => w.name === "points_store"); + this.tensionWidget = context.widgets.find(w => w.name === "tension"); + this.minValueWidget = context.widgets.find(w => w.name === "min_value"); + this.maxValueWidget = context.widgets.find(w => w.name === "max_value"); + this.samplingMethodWidget = context.widgets.find(w => w.name === "sampling_method"); + this.widthWidget = context.widgets.find(w => w.name === "mask_width"); + this.heightWidget = context.widgets.find(w => w.name === "mask_height"); + + this.interpolation = this.interpolationWidget.value + this.tension = this.tensionWidget.value + this.points_to_sample = this.pointsWidget.value + this.rangeMin = this.minValueWidget.value + this.rangeMax = this.maxValueWidget.value + this.pointsLayer = null; + this.samplingMethod = this.samplingMethodWidget.value + + if (this.samplingMethod == "path") { + this.dotShape = "triangle" + } + + + this.interpolationWidget.callback = () => { + this.interpolation = this.interpolationWidget.value + this.updatePath(); + } + this.samplingMethodWidget.callback = () => { + this.samplingMethod = this.samplingMethodWidget.value + if (this.samplingMethod == "path") { + this.dotShape = "triangle" + } + else if (this.samplingMethod == "controlpoints") { + this.dotShape = "circle" + this.drawSamplePoints = true; + } + this.updatePath(); + } + this.tensionWidget.callback = () => { + this.tension = this.tensionWidget.value + this.updatePath(); + } + this.pointsWidget.callback = () => { + this.points_to_sample = this.pointsWidget.value + this.updatePath(); + } + this.minValueWidget.callback = () => { + this.rangeMin = this.minValueWidget.value + this.updatePath(); + } + this.maxValueWidget.callback = () => { + this.rangeMax = this.maxValueWidget.value + this.updatePath(); + } + this.widthWidget.callback = () => { + this.width = this.widthWidget.value; + if (this.width > 256) { + context.setSize([this.width + 45, context.size[1]]); + } + this.vis.width(this.width); + this.updatePath(); +} +this.heightWidget.callback = () => { + this.height = this.heightWidget.value + this.vis.height(this.height) + context.setSize([context.size[0], this.height + 430]); + this.updatePath(); + } + this.pointsStoreWidget.callback = () => { + points = JSON.parse(this.pointsStoreWidget.value); + this.updatePath(); + } + + // Initialize or reset points array + this.drawHandles = false; + this.drawRuler = true; + var hoverIndex = -1; + var isDragging = false; + this.width = this.widthWidget.value; + this.height = this.heightWidget.value; + var i = 3; + this.points = []; + + if (!reset && this.pointsStoreWidget.value != "") { + this.points = JSON.parse(this.pointsStoreWidget.value); + } else { + this.points = pv.range(1, 4).map((i, index) => { + if (index === 0) { + // First point at the bottom-left corner + return { x: 0, y: this.height }; + } else if (index === 2) { + // Last point at the top-right corner + return { x: this.width, y: 0 }; + } else { + // Other points remain as they were + return { + x: i * this.width / 5, + y: 50 + Math.random() * (this.height - 100) + }; + } + }); + this.pointsStoreWidget.value = JSON.stringify(this.points); + } + + this.vis = new pv.Panel() + .width(this.width) + .height(this.height) + .fillStyle("#222") + .strokeStyle("gray") + .lineWidth(2) + .antialias(false) + .margin(10) + .event("mousedown", function () { + if (pv.event.shiftKey) { // Use pv.event to access the event object + let scaledMouse = { + x: this.mouse().x / app.canvas.ds.scale, + y: this.mouse().y / app.canvas.ds.scale + }; + i = self.points.push(scaledMouse) - 1; + self.updatePath(); + return this; + } + else if (pv.event.ctrlKey) { + // Capture the clicked location + let clickedPoint = { + x: this.mouse().x / app.canvas.ds.scale, + y: this.mouse().y / app.canvas.ds.scale + }; + + // Find the two closest points to the clicked location + let { point1Index, point2Index } = self.findClosestPoints(self.points, clickedPoint); + + // Calculate the midpoint between the two closest points + let midpoint = { + x: (self.points[point1Index].x + self.points[point2Index].x) / 2, + y: (self.points[point1Index].y + self.points[point2Index].y) / 2 + }; + + // Insert the midpoint into the array + self.points.splice(point2Index, 0, midpoint); + i = point2Index; + self.updatePath(); + } + else if (pv.event.button === 2) { + self.node.contextMenu.style.display = 'block'; + self.node.contextMenu.style.left = `${pv.event.clientX}px`; + self.node.contextMenu.style.top = `${pv.event.clientY}px`; + } + }) + this.backgroundImage = this.vis.add(pv.Image).visible(false) + + this.vis.add(pv.Rule) + .data(pv.range(0, this.height, 64)) + .bottom(d => d) + .strokeStyle("gray") + .lineWidth(3) + .visible(() => self.drawRuler) + + // vis.add(pv.Rule) + // .data(pv.range(0, points_to_sample, 1)) + // .left(d => d * 512 / (points_to_sample - 1)) + // .strokeStyle("gray") + // .lineWidth(2) + + this.vis.add(pv.Line) + .data(() => this.points) + .left(d => d.x) + .top(d => d.y) + .interpolate(() => this.interpolation) + .tension(() => this.tension) + .segmented(() => false) + .strokeStyle(pv.Colors.category10().by(pv.index)) + .lineWidth(3) + + this.vis.add(pv.Dot) + .data(() => this.points) + .left(d => d.x) + .top(d => d.y) + .radius(10) + .shape(function() { + return self.dotShape; + }) + .angle(function() { + const index = this.index; + let angle = 0; + + if (self.dotShape === "triangle") { + let dxNext = 0, dyNext = 0; + if (index < self.points.length - 1) { + dxNext = self.points[index + 1].x - self.points[index].x; + dyNext = self.points[index + 1].y - self.points[index].y; + } + + let dxPrev = 0, dyPrev = 0; + if (index > 0) { + dxPrev = self.points[index].x - self.points[index - 1].x; + dyPrev = self.points[index].y - self.points[index - 1].y; + } + + const dx = (dxNext + dxPrev) / 2; + const dy = (dyNext + dyPrev) / 2; + + angle = Math.atan2(dy, dx); + angle -= Math.PI / 2; + angle = (angle + 2 * Math.PI) % (2 * Math.PI); + } + + return angle; + }) + .cursor("move") + .strokeStyle(function () { return i == this.index ? "#ff7f0e" : "#1f77b4"; }) + .fillStyle(function () { return "rgba(100, 100, 100, 0.3)"; }) + .event("mousedown", pv.Behavior.drag()) + .event("dragstart", function () { + i = this.index; + hoverIndex = this.index; + isDragging = true; + if (pv.event.button === 2 && i !== 0 && i !== self.points.length - 1) { + self.points.splice(i--, 1); + self.vis.render(); + } + return this; + }) + .event("dragend", function() { + if (this.pathElements !== null) { + self.updatePath(); + } + isDragging = false; + }) + .event("drag", function () { + let adjustedX = this.mouse().x / app.canvas.ds.scale; // Adjust the new X position by the inverse of the scale factor + let adjustedY = this.mouse().y / app.canvas.ds.scale; // Adjust the new Y position by the inverse of the scale factor + // Determine the bounds of the vis.Panel + const panelWidth = self.vis.width(); + const panelHeight = self.vis.height(); + + // Adjust the new position if it would place the dot outside the bounds of the vis.Panel + adjustedX = Math.max(0, Math.min(panelWidth, adjustedX)); + adjustedY = Math.max(0, Math.min(panelHeight, adjustedY)); + self.points[this.index] = { x: adjustedX, y: adjustedY }; // Update the point's position + self.vis.render(); // Re-render the visualization to reflect the new position + }) + .event("mouseover", function() { + hoverIndex = this.index; // Set the hover index to the index of the hovered dot + self.vis.render(); // Re-render the visualization + }) + .event("mouseout", function() { + !isDragging && (hoverIndex = -1); // Reset the hover index when the mouse leaves the dot + self.vis.render(); // Re-render the visualization + }) + .anchor("center") + .add(pv.Label) + .visible(function() { + return hoverIndex === this.index; // Only show the label for the hovered dot + }) + .left(d => d.x < this.width / 2 ? d.x + 80 : d.x - 70) // Shift label to right if on left half, otherwise shift to left + .top(d => d.y < this.height / 2 ? d.y + 20 : d.y - 20) // Shift label down if on top half, otherwise shift up + .font(12 + "px sans-serif") + .text(d => { + if (this.samplingMethod == "path") { + return `X: ${Math.round(d.x)}, Y: ${Math.round(d.y)}`; + } else { + let frame = Math.round((d.x / self.width) * self.points_to_sample); + let normalizedY = (1.0 - (d.y / self.height) - 0.0) * (self.rangeMax - self.rangeMin) + self.rangeMin; + let normalizedX = (d.x / self.width); + return `F: ${frame}, X: ${normalizedX.toFixed(2)}, Y: ${normalizedY.toFixed(2)}`; + } + }) + .textStyle("orange") + + if (this.points.length != 0) { + this.vis.render(); + } + var svgElement = this.vis.canvas(); + svgElement.style['zIndex'] = "2" + svgElement.style['position'] = "relative" + this.node.splineEditor.element.appendChild(svgElement); + this.pathElements = svgElement.getElementsByTagName('path'); // Get all path elements + + if (this.width > 256) { + this.node.setSize([this.width + 45, this.node.size[1]]); + } + this.node.setSize([this.node.size[0], this.height + 430]); + this.updatePath(); + this.refreshBackgroundImage(); +} + + updatePath = () => { + if (!this.points || this.points.length === 0) { + console.log("no points"); + return; + } + if (this.samplingMethod != "controlpoints") { + var coords = this.samplePoints(this.pathElements[0], this.points_to_sample, this.samplingMethod, this.width); + } + else { + var coords = this.points + } + + if (this.drawSamplePoints) { + if (this.pointsLayer) { + // Update the data of the existing points layer + this.pointsLayer.data(coords); + } else { + // Create the points layer if it doesn't exist + this.pointsLayer = this.vis.add(pv.Dot) + .data(coords) + .left(function(d) { return d.x; }) + .top(function(d) { return d.y; }) + .radius(5) // Adjust the radius as needed + .fillStyle("red") // Change the color as needed + .strokeStyle("black") // Change the stroke color as needed + .lineWidth(1); // Adjust the line width as needed + } + } else { + if (this.pointsLayer) { + // Remove the points layer + this.pointsLayer.data([]); + this.vis.render(); + } + } + let coordsString = JSON.stringify(coords); + this.pointsStoreWidget.value = JSON.stringify(this.points); + if (this.coordWidget) { + this.coordWidget.value = coordsString; + } + this.vis.render(); + }; + handleImageLoad = (img, file, base64String) => { + console.log(img.width, img.height); // Access width and height here + this.widthWidget.value = img.width; + this.heightWidget.value = img.height; + this.drawRuler = false; + + if (img.width != this.vis.width() || img.height != this.vis.height()) { + if (img.width > 256) { + this.node.setSize([img.width + 45, this.node.size[1]]); + } + this.node.setSize([this.node.size[0], img.height + 500]); + this.vis.width(img.width); + this.vis.height(img.height); + this.height = img.height; + this.width = img.width; + + this.updatePath(); + } + this.backgroundImage.url(file ? URL.createObjectURL(file) : `data:${this.node.properties.imgData.type};base64,${base64String}`).visible(true).root.render(); + }; + + processImage = (img, file) => { + const canvas = document.createElement('canvas'); + const ctx = canvas.getContext('2d'); + + const maxWidth = 800; // maximum width + const maxHeight = 600; // maximum height + let width = img.width; + let height = img.height; + + // Calculate the new dimensions while preserving the aspect ratio + if (width > height) { + if (width > maxWidth) { + height *= maxWidth / width; + width = maxWidth; + } + } else { + if (height > maxHeight) { + width *= maxHeight / height; + height = maxHeight; + } + } + + canvas.width = width; + canvas.height = height; + ctx.drawImage(img, 0, 0, width, height); + + // Get the compressed image data as a Base64 string + const base64String = canvas.toDataURL('image/jpeg', 0.5).replace('data:', '').replace(/^.+,/, ''); // 0.5 is the quality from 0 to 1 + + this.node.properties.imgData = { + name: file.name, + lastModified: file.lastModified, + size: file.size, + type: file.type, + base64: base64String + }; + handleImageLoad(img, file, base64String); + }; + + handleImageFile = (file) => { + const reader = new FileReader(); + reader.onloadend = () => { + const img = new Image(); + img.src = reader.result; + img.onload = () => processImage(img, file); + }; + reader.readAsDataURL(file); + + const imageUrl = URL.createObjectURL(file); + const img = new Image(); + img.src = imageUrl; + img.onload = () => this.handleImageLoad(img, file, null); + }; + + refreshBackgroundImage = () => { + if (this.node.properties.imgData && this.node.properties.imgData.base64) { + const base64String = this.node.properties.imgData.base64; + const imageUrl = `data:${this.node.properties.imgData.type};base64,${base64String}`; + const img = new Image(); + img.src = imageUrl; + img.onload = () => this.handleImageLoad(img, null, base64String); + } + }; + + createContextMenu = () => { + self = this; + document.addEventListener('contextmenu', function (e) { + e.preventDefault(); + + }); + + document.addEventListener('click', function (e) { + document.querySelectorAll('.spline-editor-context-menu').forEach(menu => { + menu.style.display = 'none'; + }); + }); + + this.node.menuItems.forEach((menuItem, index) => { + self = this; + menuItem.addEventListener('click', function (e) { + e.preventDefault(); + switch (index) { + case 0: + e.preventDefault(); + if (!self.drawHandles) { + self.drawHandles = true + self.vis.add(pv.Line) + .data(() => self.points.map((point, index) => ({ + start: point, + end: [index] + }))) + .left(d => d.start.x) + .top(d => d.start.y) + .interpolate("linear") + .tension(0) // Straight lines + .strokeStyle("#ff7f0e") // Same color as control points + .lineWidth(1) + .visible(() => self.drawHandles); + self.vis.render(); + } else { + self.drawHandles = false + self.vis.render(); + } + self.node.contextMenu.style.display = 'none'; + break; + case 1: + e.preventDefault(); + self.drawSamplePoints = !self.drawSamplePoints; + self.updatePath(); + break; + case 2: + e.preventDefault(); + if (self.dotShape == "circle"){ + self.dotShape = "triangle" + } + else { + self.dotShape = "circle" + } + console.log(self.dotShape) + self.updatePath(); + break; + case 3: + // Create file input element + const fileInput = document.createElement('input'); + fileInput.type = 'file'; + fileInput.accept = 'image/*'; // Accept only image files + + // Listen for file selection + fileInput.addEventListener('change', function (event) { + const file = event.target.files[0]; // Get the selected file + + if (file) { + const imageUrl = URL.createObjectURL(file); + let img = new Image(); + img.src = imageUrl; + img.onload = () => self.handleImageLoad(img, file, null); + } + }); + + fileInput.click(); + + self.node.contextMenu.style.display = 'none'; + break; + case 4: + e.preventDefault(); + self.points.reverse(); + self.updatePath(); + break; + case 5: + self.backgroundImage.visible(false).root.render(); + self.node.properties.imgData = null; + self.node.contextMenu.style.display = 'none'; + break; + } + }); + }); + } + + samplePoints(svgPathElement, numSamples, samplingMethod, width) { + var svgWidth = width; // Fixed width of the SVG element + var pathLength = svgPathElement.getTotalLength(); + var points = []; + + for (var i = 0; i < numSamples; i++) { + if (samplingMethod === "time") { + // Calculate the x-coordinate for the current sample based on the SVG's width + var x = (svgWidth / (numSamples - 1)) * i; + // Find the point on the path that intersects the vertical line at the calculated x-coordinate + var point = this.findPointAtX(svgPathElement, x, pathLength); + } + else if (samplingMethod === "path") { + // Calculate the distance along the path for the current sample + var distance = (pathLength / (numSamples - 1)) * i; + // Get the point at the current distance + var point = svgPathElement.getPointAtLength(distance); + } + + // Add the point to the array of points + points.push({ x: point.x, y: point.y }); + } + return points; + } + + findClosestPoints(points, clickedPoint) { + // Calculate distances from clickedPoint to each point in the array + let distances = points.map(point => { + let dx = clickedPoint.x - point.x; + let dy = clickedPoint.y - point.y; + return { index: points.indexOf(point), distance: Math.sqrt(dx * dx + dy * dy) }; + }); + // Sort distances and get the indices of the two closest points + let sortedDistances = distances.sort((a, b) => a.distance - b.distance); + let closestPoint1Index = sortedDistances[0].index; + let closestPoint2Index = sortedDistances[1].index; + // Ensure point1Index is always the smaller index + if (closestPoint1Index > closestPoint2Index) { + [closestPoint1Index, closestPoint2Index] = [closestPoint2Index, closestPoint1Index]; + } + return { point1Index: closestPoint1Index, point2Index: closestPoint2Index }; + } + + findPointAtX(svgPathElement, targetX, pathLength) { + let low = 0; + let high = pathLength; + let bestPoint = svgPathElement.getPointAtLength(0); + + while (low <= high) { + let mid = low + (high - low) / 2; + let point = svgPathElement.getPointAtLength(mid); + + if (Math.abs(point.x - targetX) < 1) { + return point; // The point is close enough to the target + } + + if (point.x < targetX) { + low = mid + 1; + } else { + high = mid - 1; + } + + // Keep track of the closest point found so far + if (Math.abs(point.x - targetX) < Math.abs(bestPoint.x - targetX)) { + bestPoint = point; + } + } + + // Return the closest point found + return bestPoint; + } +} +//from melmass +export function hideWidgetForGood(node, widget, suffix = '') { + widget.origType = widget.type + widget.origComputeSize = widget.computeSize + widget.origSerializeValue = widget.serializeValue + widget.computeSize = () => [0, -4] // -4 is due to the gap litegraph adds between widgets automatically + widget.type = "converted-widget" + suffix + // widget.serializeValue = () => { + // // Prevent serializing the widget if we have no input linked + // const w = node.inputs?.find((i) => i.widget?.name === widget.name); + // if (w?.link == null) { + // return undefined; + // } + // return widget.origSerializeValue ? widget.origSerializeValue() : widget.value; + // }; + + // Hide any linked widgets, e.g. seed+seedControl + if (widget.linkedWidgets) { + for (const w of widget.linkedWidgets) { + hideWidgetForGood(node, w, ':' + widget.name) + } + } +} \ No newline at end of file diff --git a/custom_nodes/ComfyUI-KJNodes-main/web/red.png b/custom_nodes/ComfyUI-KJNodes-main/web/red.png new file mode 100644 index 0000000000000000000000000000000000000000..4352c118b2c5fa6f33edc4d99a5e4d22649ff827 Binary files /dev/null and b/custom_nodes/ComfyUI-KJNodes-main/web/red.png differ diff --git a/custom_nodes/ComfyUI-essentials-main/.gitignore b/custom_nodes/ComfyUI-essentials-main/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..b5a4568fd5c499d1b66b0b066ae73a2f4ea1f4c7 --- /dev/null +++ b/custom_nodes/ComfyUI-essentials-main/.gitignore @@ -0,0 +1,6 @@ +/__pycache__/ +/luts/*.cube +/luts/*.CUBE +/fonts/*.ttf +/fonts/*.otf +!/fonts/ShareTechMono-Regular.ttf \ No newline at end of file diff --git a/custom_nodes/ComfyUI-essentials-main/LICENSE b/custom_nodes/ComfyUI-essentials-main/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..948b5e4192d70b665b15bb5a917bd98b3771eb4b --- /dev/null +++ b/custom_nodes/ComfyUI-essentials-main/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2023 Matteo Spinelli + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/custom_nodes/ComfyUI-essentials-main/README.md b/custom_nodes/ComfyUI-essentials-main/README.md new file mode 100644 index 0000000000000000000000000000000000000000..cc1871fcffb2908c865c4d3c03d226d0954ef41a --- /dev/null +++ b/custom_nodes/ComfyUI-essentials-main/README.md @@ -0,0 +1,49 @@ +# :wrench: ComfyUI Essentials + +Essential nodes that are weirdly missing from ComfyUI core. With few exceptions they are new features and not commodities. I hope this will be just a temporary repository until the nodes get included into ComfyUI. + +# Sponsorship + +
    + +**[:heart: Github Sponsor](https://github.com/sponsors/cubiq) | [:coin: Paypal](https://paypal.me/matt3o)** + +
    + +If you like my work and wish to see updates and new features please consider sponsoring my projects. + +- [ComfyUI IPAdapter Plus](https://github.com/cubiq/ComfyUI_IPAdapter_plus) +- [ComfyUI InstantID (Native)](https://github.com/cubiq/ComfyUI_InstantID) +- [ComfyUI Essentials](https://github.com/cubiq/ComfyUI_essentials) +- [ComfyUI FaceAnalysis](https://github.com/cubiq/ComfyUI_FaceAnalysis) + +Not to mention the documentation and videos tutorials. Check my **ComfyUI Advanced Understanding** videos on YouTube for example, [part 1](https://www.youtube.com/watch?v=_C7kR2TFIX0) and [part 2](https://www.youtube.com/watch?v=ijqXnW_9gzc) + +The only way to keep the code open and free is by sponsoring its development. The more sponsorships the more time I can dedicate to my open source projects. + +Please consider a [Github Sponsorship](https://github.com/sponsors/cubiq) or [PayPal donation](https://paypal.me/matt3o) (Matteo "matt3o" Spinelli). For sponsorships of $50+, let me know if you'd like to be mentioned in this readme file, you can find me on [Discord](https://latent.vision/discord) or _matt3o :snail: gmail.com_. + +## Current sponsors + +It's only thanks to generous sponsors that **the whole community** can enjoy open and free software. Please join me in thanking the following companies and individuals! + +### :trophy: Gold sponsors + +[![Kaiber.ai](https://f.latent.vision/imgs/kaiber.png)](https://kaiber.ai/)   [![InstaSD](https://f.latent.vision/imgs/instasd.png)](https://www.instasd.com/) + +### :tada: Silver sponsors + +[![OperArt.ai](https://f.latent.vision/imgs/openart.png?r=1)](https://openart.ai/workflows)   [![Finetuners](https://f.latent.vision/imgs/finetuners.png)](https://www.finetuners.ai/)   [![Comfy.ICU](https://f.latent.vision/imgs/comfyicu.png?r=1)](https://comfy.icu/) + +### Other companies supporting my projects + +- [RunComfy](https://www.runcomfy.com/) (ComfyUI Cloud) + +### Esteemed individuals + +- [Øystein Ø. Olsen](https://github.com/FireNeslo) +- [Jack Gane](https://github.com/ganeJackS) +- [Nathan Shipley](https://www.nathanshipley.com/) +- [Dkdnzia](https://github.com/Dkdnzia) + +[And all my public and private sponsors!](https://github.com/sponsors/cubiq) \ No newline at end of file diff --git a/custom_nodes/ComfyUI-essentials-main/__init__.py b/custom_nodes/ComfyUI-essentials-main/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..c3008a91c230ea50a2a8bc013f2e60cc7babc77c --- /dev/null +++ b/custom_nodes/ComfyUI-essentials-main/__init__.py @@ -0,0 +1,36 @@ +#from .essentials import NODE_CLASS_MAPPINGS, NODE_DISPLAY_NAME_MAPPINGS +from .image import IMAGE_CLASS_MAPPINGS, IMAGE_NAME_MAPPINGS +from .mask import MASK_CLASS_MAPPINGS, MASK_NAME_MAPPINGS +from .sampling import SAMPLING_CLASS_MAPPINGS, SAMPLING_NAME_MAPPINGS +from .segmentation import SEG_CLASS_MAPPINGS, SEG_NAME_MAPPINGS +from .misc import MISC_CLASS_MAPPINGS, MISC_NAME_MAPPINGS +from .conditioning import COND_CLASS_MAPPINGS, COND_NAME_MAPPINGS +from .text import TEXT_CLASS_MAPPINGS, TEXT_NAME_MAPPINGS + +WEB_DIRECTORY = "./js" + +NODE_CLASS_MAPPINGS = {} +NODE_DISPLAY_NAME_MAPPINGS = {} + +NODE_CLASS_MAPPINGS.update(COND_CLASS_MAPPINGS) +NODE_DISPLAY_NAME_MAPPINGS.update(COND_NAME_MAPPINGS) + +NODE_CLASS_MAPPINGS.update(IMAGE_CLASS_MAPPINGS) +NODE_DISPLAY_NAME_MAPPINGS.update(IMAGE_NAME_MAPPINGS) + +NODE_CLASS_MAPPINGS.update(MASK_CLASS_MAPPINGS) +NODE_DISPLAY_NAME_MAPPINGS.update(MASK_NAME_MAPPINGS) + +NODE_CLASS_MAPPINGS.update(SAMPLING_CLASS_MAPPINGS) +NODE_DISPLAY_NAME_MAPPINGS.update(SAMPLING_NAME_MAPPINGS) + +NODE_CLASS_MAPPINGS.update(SEG_CLASS_MAPPINGS) +NODE_DISPLAY_NAME_MAPPINGS.update(SEG_NAME_MAPPINGS) + +NODE_CLASS_MAPPINGS.update(TEXT_CLASS_MAPPINGS) +NODE_DISPLAY_NAME_MAPPINGS.update(TEXT_NAME_MAPPINGS) + +NODE_CLASS_MAPPINGS.update(MISC_CLASS_MAPPINGS) +NODE_DISPLAY_NAME_MAPPINGS.update(MISC_NAME_MAPPINGS) + +__all__ = ['NODE_CLASS_MAPPINGS', 'NODE_DISPLAY_NAME_MAPPINGS', "WEB_DIRECTORY"] diff --git a/custom_nodes/ComfyUI-essentials-main/carve.py b/custom_nodes/ComfyUI-essentials-main/carve.py new file mode 100644 index 0000000000000000000000000000000000000000..017e804d84324fde3d4004a3ed832676b8237985 --- /dev/null +++ b/custom_nodes/ComfyUI-essentials-main/carve.py @@ -0,0 +1,454 @@ +# MIT licensed code from https://github.com/li-plus/seam-carving/ + +from enum import Enum +from typing import Optional, Tuple + +import numba as nb +import numpy as np +from scipy.ndimage import sobel + +DROP_MASK_ENERGY = 1e5 +KEEP_MASK_ENERGY = 1e3 + + +class OrderMode(str, Enum): + WIDTH_FIRST = "width-first" + HEIGHT_FIRST = "height-first" + + +class EnergyMode(str, Enum): + FORWARD = "forward" + BACKWARD = "backward" + + +def _list_enum(enum_class) -> Tuple: + return tuple(x.value for x in enum_class) + + +def _rgb2gray(rgb: np.ndarray) -> np.ndarray: + """Convert an RGB image to a grayscale image""" + coeffs = np.array([0.2125, 0.7154, 0.0721], dtype=np.float32) + return (rgb @ coeffs).astype(rgb.dtype) + + +def _get_seam_mask(src: np.ndarray, seam: np.ndarray) -> np.ndarray: + """Convert a list of seam column indices to a mask""" + return np.eye(src.shape[1], dtype=bool)[seam] + + +def _remove_seam_mask(src: np.ndarray, seam_mask: np.ndarray) -> np.ndarray: + """Remove a seam from the source image according to the given seam_mask""" + if src.ndim == 3: + h, w, c = src.shape + seam_mask = np.broadcast_to(seam_mask[:, :, None], src.shape) + dst = src[~seam_mask].reshape((h, w - 1, c)) + else: + h, w = src.shape + dst = src[~seam_mask].reshape((h, w - 1)) + return dst + + +def _get_energy(gray: np.ndarray) -> np.ndarray: + """Get backward energy map from the source image""" + assert gray.ndim == 2 + + gray = gray.astype(np.float32) + grad_x = sobel(gray, axis=1) + grad_y = sobel(gray, axis=0) + energy = np.abs(grad_x) + np.abs(grad_y) + return energy + + +@nb.njit(nb.int32[:](nb.float32[:, :]), cache=True) +def _get_backward_seam(energy: np.ndarray) -> np.ndarray: + """Compute the minimum vertical seam from the backward energy map""" + h, w = energy.shape + inf = np.array([np.inf], dtype=np.float32) + cost = np.concatenate((inf, energy[0], inf)) + parent = np.empty((h, w), dtype=np.int32) + base_idx = np.arange(-1, w - 1, dtype=np.int32) + + for r in range(1, h): + choices = np.vstack((cost[:-2], cost[1:-1], cost[2:])) + min_idx = np.argmin(choices, axis=0) + base_idx + parent[r] = min_idx + cost[1:-1] = cost[1:-1][min_idx] + energy[r] + + c = np.argmin(cost[1:-1]) + seam = np.empty(h, dtype=np.int32) + for r in range(h - 1, -1, -1): + seam[r] = c + c = parent[r, c] + + return seam + + +def _get_backward_seams( + gray: np.ndarray, num_seams: int, aux_energy: Optional[np.ndarray] +) -> np.ndarray: + """Compute the minimum N vertical seams using backward energy""" + h, w = gray.shape + seams = np.zeros((h, w), dtype=bool) + rows = np.arange(h, dtype=np.int32) + idx_map = np.broadcast_to(np.arange(w, dtype=np.int32), (h, w)) + energy = _get_energy(gray) + if aux_energy is not None: + energy += aux_energy + for _ in range(num_seams): + seam = _get_backward_seam(energy) + seams[rows, idx_map[rows, seam]] = True + + seam_mask = _get_seam_mask(gray, seam) + gray = _remove_seam_mask(gray, seam_mask) + idx_map = _remove_seam_mask(idx_map, seam_mask) + if aux_energy is not None: + aux_energy = _remove_seam_mask(aux_energy, seam_mask) + + # Only need to re-compute the energy in the bounding box of the seam + _, cur_w = energy.shape + lo = max(0, np.min(seam) - 1) + hi = min(cur_w, np.max(seam) + 1) + pad_lo = 1 if lo > 0 else 0 + pad_hi = 1 if hi < cur_w - 1 else 0 + mid_block = gray[:, lo - pad_lo : hi + pad_hi] + _, mid_w = mid_block.shape + mid_energy = _get_energy(mid_block)[:, pad_lo : mid_w - pad_hi] + if aux_energy is not None: + mid_energy += aux_energy[:, lo:hi] + energy = np.hstack((energy[:, :lo], mid_energy, energy[:, hi + 1 :])) + + return seams + + +@nb.njit( + [ + nb.int32[:](nb.float32[:, :], nb.none), + nb.int32[:](nb.float32[:, :], nb.float32[:, :]), + ], + cache=True, +) +def _get_forward_seam(gray: np.ndarray, aux_energy: Optional[np.ndarray]) -> np.ndarray: + """Compute the minimum vertical seam using forward energy""" + h, w = gray.shape + + gray = np.hstack((gray[:, :1], gray, gray[:, -1:])) + + inf = np.array([np.inf], dtype=np.float32) + dp = np.concatenate((inf, np.abs(gray[0, 2:] - gray[0, :-2]), inf)) + + parent = np.empty((h, w), dtype=np.int32) + base_idx = np.arange(-1, w - 1, dtype=np.int32) + + inf = np.array([np.inf], dtype=np.float32) + for r in range(1, h): + curr_shl = gray[r, 2:] + curr_shr = gray[r, :-2] + cost_mid = np.abs(curr_shl - curr_shr) + if aux_energy is not None: + cost_mid += aux_energy[r] + + prev_mid = gray[r - 1, 1:-1] + cost_left = cost_mid + np.abs(prev_mid - curr_shr) + cost_right = cost_mid + np.abs(prev_mid - curr_shl) + + dp_mid = dp[1:-1] + dp_left = dp[:-2] + dp_right = dp[2:] + + choices = np.vstack( + (cost_left + dp_left, cost_mid + dp_mid, cost_right + dp_right) + ) + min_idx = np.argmin(choices, axis=0) + parent[r] = min_idx + base_idx + # numba does not support specifying axis in np.min, below loop is equivalent to: + # `dp_mid[:] = np.min(choices, axis=0)` or `dp_mid[:] = choices[min_idx, np.arange(w)]` + for j, i in enumerate(min_idx): + dp_mid[j] = choices[i, j] + + c = np.argmin(dp[1:-1]) + seam = np.empty(h, dtype=np.int32) + for r in range(h - 1, -1, -1): + seam[r] = c + c = parent[r, c] + + return seam + + +def _get_forward_seams( + gray: np.ndarray, num_seams: int, aux_energy: Optional[np.ndarray] +) -> np.ndarray: + """Compute minimum N vertical seams using forward energy""" + h, w = gray.shape + seams = np.zeros((h, w), dtype=bool) + rows = np.arange(h, dtype=np.int32) + idx_map = np.broadcast_to(np.arange(w, dtype=np.int32), (h, w)) + for _ in range(num_seams): + seam = _get_forward_seam(gray, aux_energy) + seams[rows, idx_map[rows, seam]] = True + seam_mask = _get_seam_mask(gray, seam) + gray = _remove_seam_mask(gray, seam_mask) + idx_map = _remove_seam_mask(idx_map, seam_mask) + if aux_energy is not None: + aux_energy = _remove_seam_mask(aux_energy, seam_mask) + + return seams + + +def _get_seams( + gray: np.ndarray, num_seams: int, energy_mode: str, aux_energy: Optional[np.ndarray] +) -> np.ndarray: + """Get the minimum N seams from the grayscale image""" + gray = np.asarray(gray, dtype=np.float32) + if energy_mode == EnergyMode.BACKWARD: + return _get_backward_seams(gray, num_seams, aux_energy) + elif energy_mode == EnergyMode.FORWARD: + return _get_forward_seams(gray, num_seams, aux_energy) + else: + raise ValueError( + f"expect energy_mode to be one of {_list_enum(EnergyMode)}, got {energy_mode}" + ) + + +def _reduce_width( + src: np.ndarray, + delta_width: int, + energy_mode: str, + aux_energy: Optional[np.ndarray], +) -> Tuple[np.ndarray, Optional[np.ndarray]]: + """Reduce the width of image by delta_width pixels""" + assert src.ndim in (2, 3) and delta_width >= 0 + if src.ndim == 2: + gray = src + src_h, src_w = src.shape + dst_shape: Tuple[int, ...] = (src_h, src_w - delta_width) + else: + gray = _rgb2gray(src) + src_h, src_w, src_c = src.shape + dst_shape = (src_h, src_w - delta_width, src_c) + + to_keep = ~_get_seams(gray, delta_width, energy_mode, aux_energy) + dst = src[to_keep].reshape(dst_shape) + if aux_energy is not None: + aux_energy = aux_energy[to_keep].reshape(dst_shape[:2]) + return dst, aux_energy + + +@nb.njit( + nb.float32[:, :, :](nb.float32[:, :, :], nb.boolean[:, :], nb.int32), cache=True +) +def _insert_seams_kernel( + src: np.ndarray, seams: np.ndarray, delta_width: int +) -> np.ndarray: + """The numba kernel for inserting seams""" + src_h, src_w, src_c = src.shape + dst = np.empty((src_h, src_w + delta_width, src_c), dtype=src.dtype) + for row in range(src_h): + dst_col = 0 + for src_col in range(src_w): + if seams[row, src_col]: + left = src[row, max(src_col - 1, 0)] + right = src[row, src_col] + dst[row, dst_col] = (left + right) / 2 + dst_col += 1 + dst[row, dst_col] = src[row, src_col] + dst_col += 1 + return dst + + +def _insert_seams(src: np.ndarray, seams: np.ndarray, delta_width: int) -> np.ndarray: + """Insert multiple seams into the source image""" + dst = src.astype(np.float32) + if dst.ndim == 2: + dst = dst[:, :, None] + dst = _insert_seams_kernel(dst, seams, delta_width).astype(src.dtype) + if src.ndim == 2: + dst = dst.squeeze(-1) + return dst + + +def _expand_width( + src: np.ndarray, + delta_width: int, + energy_mode: str, + aux_energy: Optional[np.ndarray], + step_ratio: float, +) -> Tuple[np.ndarray, Optional[np.ndarray]]: + """Expand the width of image by delta_width pixels""" + assert src.ndim in (2, 3) and delta_width >= 0 + if not 0 < step_ratio <= 1: + raise ValueError(f"expect `step_ratio` to be between (0,1], got {step_ratio}") + + dst = src + while delta_width > 0: + max_step_size = max(1, round(step_ratio * dst.shape[1])) + step_size = min(max_step_size, delta_width) + gray = dst if dst.ndim == 2 else _rgb2gray(dst) + seams = _get_seams(gray, step_size, energy_mode, aux_energy) + dst = _insert_seams(dst, seams, step_size) + if aux_energy is not None: + aux_energy = _insert_seams(aux_energy, seams, step_size) + delta_width -= step_size + + return dst, aux_energy + + +def _resize_width( + src: np.ndarray, + width: int, + energy_mode: str, + aux_energy: Optional[np.ndarray], + step_ratio: float, +) -> Tuple[np.ndarray, Optional[np.ndarray]]: + """Resize the width of image by removing vertical seams""" + assert src.size > 0 and src.ndim in (2, 3) + assert width > 0 + + src_w = src.shape[1] + if src_w < width: + dst, aux_energy = _expand_width( + src, width - src_w, energy_mode, aux_energy, step_ratio + ) + else: + dst, aux_energy = _reduce_width(src, src_w - width, energy_mode, aux_energy) + return dst, aux_energy + + +def _transpose_image(src: np.ndarray) -> np.ndarray: + """Transpose a source image in rgb or grayscale format""" + if src.ndim == 3: + dst = src.transpose((1, 0, 2)) + else: + dst = src.T + return dst + + +def _resize_height( + src: np.ndarray, + height: int, + energy_mode: str, + aux_energy: Optional[np.ndarray], + step_ratio: float, +) -> Tuple[np.ndarray, Optional[np.ndarray]]: + """Resize the height of image by removing horizontal seams""" + assert src.ndim in (2, 3) and height > 0 + if aux_energy is not None: + aux_energy = aux_energy.T + src = _transpose_image(src) + src, aux_energy = _resize_width(src, height, energy_mode, aux_energy, step_ratio) + src = _transpose_image(src) + if aux_energy is not None: + aux_energy = aux_energy.T + return src, aux_energy + + +def _check_mask(mask: np.ndarray, shape: Tuple[int, ...]) -> np.ndarray: + """Ensure the mask to be a 2D grayscale map of specific shape""" + mask = np.asarray(mask, dtype=bool) + if mask.ndim != 2: + raise ValueError(f"expect mask to be a 2d binary map, got shape {mask.shape}") + if mask.shape != shape: + raise ValueError( + f"expect the shape of mask to match the image, got {mask.shape} vs {shape}" + ) + return mask + + +def _check_src(src: np.ndarray) -> np.ndarray: + """Ensure the source to be RGB or grayscale""" + src = np.asarray(src) + if src.size == 0 or src.ndim not in (2, 3): + raise ValueError( + f"expect a 3d rgb image or a 2d grayscale image, got image in shape {src.shape}" + ) + return src + + +def seam_carving( + src: np.ndarray, + size: Optional[Tuple[int, int]] = None, + energy_mode: str = "backward", + order: str = "width-first", + keep_mask: Optional[np.ndarray] = None, + drop_mask: Optional[np.ndarray] = None, + step_ratio: float = 0.5, +) -> np.ndarray: + """Resize the image using the content-aware seam-carving algorithm. + + :param src: A source image in RGB or grayscale format. + :param size: The target size in pixels, as a 2-tuple (width, height). + :param energy_mode: Policy to compute energy for the source image. Could be + one of ``backward`` or ``forward``. If ``backward``, compute the energy + as the gradient at each pixel. If ``forward``, compute the energy as the + distances between adjacent pixels after each pixel is removed. + :param order: The order to remove horizontal and vertical seams. Could be + one of ``width-first`` or ``height-first``. In ``width-first`` mode, we + remove or insert all vertical seams first, then the horizontal ones, + while ``height-first`` is the opposite. + :param keep_mask: An optional mask where the foreground is protected from + seam removal. If not specified, no area will be protected. + :param drop_mask: An optional binary object mask to remove. If given, the + object will be removed before resizing the image to the target size. + :param step_ratio: The maximum size expansion ratio in one seam carving step. + The image will be expanded in multiple steps if target size is too large. + :return: A resized copy of the source image. + """ + src = _check_src(src) + + if order not in _list_enum(OrderMode): + raise ValueError( + f"expect order to be one of {_list_enum(OrderMode)}, got {order}" + ) + + aux_energy = None + + if keep_mask is not None: + keep_mask = _check_mask(keep_mask, src.shape[:2]) + + aux_energy = np.zeros(src.shape[:2], dtype=np.float32) + aux_energy[keep_mask] += KEEP_MASK_ENERGY + + # remove object if `drop_mask` is given + if drop_mask is not None: + drop_mask = _check_mask(drop_mask, src.shape[:2]) + + if aux_energy is None: + aux_energy = np.zeros(src.shape[:2], dtype=np.float32) + aux_energy[drop_mask] -= DROP_MASK_ENERGY + + if order == OrderMode.HEIGHT_FIRST: + src = _transpose_image(src) + aux_energy = aux_energy.T + + num_seams = (aux_energy < 0).sum(1).max() + while num_seams > 0: + src, aux_energy = _reduce_width(src, num_seams, energy_mode, aux_energy) + num_seams = (aux_energy < 0).sum(1).max() + + if order == OrderMode.HEIGHT_FIRST: + src = _transpose_image(src) + aux_energy = aux_energy.T + + # resize image if `size` is given + if size is not None: + width, height = size + width = round(width) + height = round(height) + if width <= 0 or height <= 0: + raise ValueError(f"expect target size to be positive, got {size}") + + if order == OrderMode.WIDTH_FIRST: + src, aux_energy = _resize_width( + src, width, energy_mode, aux_energy, step_ratio + ) + src, aux_energy = _resize_height( + src, height, energy_mode, aux_energy, step_ratio + ) + else: + src, aux_energy = _resize_height( + src, height, energy_mode, aux_energy, step_ratio + ) + src, aux_energy = _resize_width( + src, width, energy_mode, aux_energy, step_ratio + ) + + return src diff --git a/custom_nodes/ComfyUI-essentials-main/conditioning.py b/custom_nodes/ComfyUI-essentials-main/conditioning.py new file mode 100644 index 0000000000000000000000000000000000000000..dad5a419cad57239a4f01de20918f0ed1d6174c0 --- /dev/null +++ b/custom_nodes/ComfyUI-essentials-main/conditioning.py @@ -0,0 +1,280 @@ +from nodes import MAX_RESOLUTION, ConditioningZeroOut, ConditioningSetTimestepRange, ConditioningCombine +import re + +class CLIPTextEncodeSDXLSimplified: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "width": ("INT", {"default": 1024.0, "min": 0, "max": MAX_RESOLUTION}), + "height": ("INT", {"default": 1024.0, "min": 0, "max": MAX_RESOLUTION}), + "size_cond_factor": ("INT", {"default": 4, "min": 1, "max": 16 }), + "text": ("STRING", {"multiline": True, "dynamicPrompts": True, "default": ""}), + "clip": ("CLIP", ), + }} + RETURN_TYPES = ("CONDITIONING",) + FUNCTION = "execute" + CATEGORY = "essentials/conditioning" + + def execute(self, clip, width, height, size_cond_factor, text): + crop_w = 0 + crop_h = 0 + width = width*size_cond_factor + height = height*size_cond_factor + target_width = width + target_height = height + text_g = text_l = text + + tokens = clip.tokenize(text_g) + tokens["l"] = clip.tokenize(text_l)["l"] + if len(tokens["l"]) != len(tokens["g"]): + empty = clip.tokenize("") + while len(tokens["l"]) < len(tokens["g"]): + tokens["l"] += empty["l"] + while len(tokens["l"]) > len(tokens["g"]): + tokens["g"] += empty["g"] + cond, pooled = clip.encode_from_tokens(tokens, return_pooled=True) + return ([[cond, {"pooled_output": pooled, "width": width, "height": height, "crop_w": crop_w, "crop_h": crop_h, "target_width": target_width, "target_height": target_height}]], ) + +class ConditioningCombineMultiple: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "conditioning_1": ("CONDITIONING",), + "conditioning_2": ("CONDITIONING",), + }, "optional": { + "conditioning_3": ("CONDITIONING",), + "conditioning_4": ("CONDITIONING",), + "conditioning_5": ("CONDITIONING",), + }, + } + RETURN_TYPES = ("CONDITIONING",) + FUNCTION = "execute" + CATEGORY = "essentials/conditioning" + + def execute(self, conditioning_1, conditioning_2, conditioning_3=None, conditioning_4=None, conditioning_5=None): + c = conditioning_1 + conditioning_2 + + if conditioning_3 is not None: + c += conditioning_3 + if conditioning_4 is not None: + c += conditioning_4 + if conditioning_5 is not None: + c += conditioning_5 + + return (c,) + +class SD3NegativeConditioning: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "conditioning": ("CONDITIONING",), + "end": ("FLOAT", {"default": 0.1, "min": 0.0, "max": 1.0, "step": 0.001 }), + }} + RETURN_TYPES = ("CONDITIONING",) + FUNCTION = "execute" + CATEGORY = "essentials/conditioning" + + def execute(self, conditioning, end): + zero_c = ConditioningZeroOut().zero_out(conditioning)[0] + + if end == 0: + return (zero_c, ) + + c = ConditioningSetTimestepRange().set_range(conditioning, 0, end)[0] + zero_c = ConditioningSetTimestepRange().set_range(zero_c, end, 1.0)[0] + c = ConditioningCombine().combine(zero_c, c)[0] + + return (c, ) + +class FluxAttentionSeeker: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "clip": ("CLIP",), + "apply_to_query": ("BOOLEAN", { "default": True }), + "apply_to_key": ("BOOLEAN", { "default": True }), + "apply_to_value": ("BOOLEAN", { "default": True }), + "apply_to_out": ("BOOLEAN", { "default": True }), + **{f"clip_l_{s}": ("FLOAT", { "display": "slider", "default": 1.0, "min": 0, "max": 5, "step": 0.05 }) for s in range(12)}, + **{f"t5xxl_{s}": ("FLOAT", { "display": "slider", "default": 1.0, "min": 0, "max": 5, "step": 0.05 }) for s in range(24)}, + }} + + RETURN_TYPES = ("CLIP",) + FUNCTION = "execute" + + CATEGORY = "essentials/conditioning" + + def execute(self, clip, apply_to_query, apply_to_key, apply_to_value, apply_to_out, **values): + if not apply_to_key and not apply_to_query and not apply_to_value and not apply_to_out: + return (clip, ) + + m = clip.clone() + sd = m.patcher.model_state_dict() + + for k in sd: + if "self_attn" in k: + layer = re.search(r"\.layers\.(\d+)\.", k) + layer = int(layer.group(1)) if layer else None + + if layer is not None and values[f"clip_l_{layer}"] != 1.0: + if (apply_to_query and "q_proj" in k) or (apply_to_key and "k_proj" in k) or (apply_to_value and "v_proj" in k) or (apply_to_out and "out_proj" in k): + m.add_patches({k: (None,)}, 0.0, values[f"clip_l_{layer}"]) + elif "SelfAttention" in k: + block = re.search(r"\.block\.(\d+)\.", k) + block = int(block.group(1)) if block else None + + if block is not None and values[f"t5xxl_{block}"] != 1.0: + if (apply_to_query and ".q." in k) or (apply_to_key and ".k." in k) or (apply_to_value and ".v." in k) or (apply_to_out and ".o." in k): + m.add_patches({k: (None,)}, 0.0, values[f"t5xxl_{block}"]) + + return (m, ) + +class SD3AttentionSeekerLG: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "clip": ("CLIP",), + "apply_to_query": ("BOOLEAN", { "default": True }), + "apply_to_key": ("BOOLEAN", { "default": True }), + "apply_to_value": ("BOOLEAN", { "default": True }), + "apply_to_out": ("BOOLEAN", { "default": True }), + **{f"clip_l_{s}": ("FLOAT", { "display": "slider", "default": 1.0, "min": 0, "max": 5, "step": 0.05 }) for s in range(12)}, + **{f"clip_g_{s}": ("FLOAT", { "display": "slider", "default": 1.0, "min": 0, "max": 5, "step": 0.05 }) for s in range(32)}, + }} + + RETURN_TYPES = ("CLIP",) + FUNCTION = "execute" + + CATEGORY = "essentials/conditioning" + + def execute(self, clip, apply_to_query, apply_to_key, apply_to_value, apply_to_out, **values): + if not apply_to_key and not apply_to_query and not apply_to_value and not apply_to_out: + return (clip, ) + + m = clip.clone() + sd = m.patcher.model_state_dict() + + for k in sd: + if "self_attn" in k: + layer = re.search(r"\.layers\.(\d+)\.", k) + layer = int(layer.group(1)) if layer else None + + if layer is not None: + if "clip_l" in k and values[f"clip_l_{layer}"] != 1.0: + if (apply_to_query and "q_proj" in k) or (apply_to_key and "k_proj" in k) or (apply_to_value and "v_proj" in k) or (apply_to_out and "out_proj" in k): + m.add_patches({k: (None,)}, 0.0, values[f"clip_l_{layer}"]) + elif "clip_g" in k and values[f"clip_g_{layer}"] != 1.0: + if (apply_to_query and "q_proj" in k) or (apply_to_key and "k_proj" in k) or (apply_to_value and "v_proj" in k) or (apply_to_out and "out_proj" in k): + m.add_patches({k: (None,)}, 0.0, values[f"clip_g_{layer}"]) + + return (m, ) + +class SD3AttentionSeekerT5: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "clip": ("CLIP",), + "apply_to_query": ("BOOLEAN", { "default": True }), + "apply_to_key": ("BOOLEAN", { "default": True }), + "apply_to_value": ("BOOLEAN", { "default": True }), + "apply_to_out": ("BOOLEAN", { "default": True }), + **{f"t5xxl_{s}": ("FLOAT", { "display": "slider", "default": 1.0, "min": 0, "max": 5, "step": 0.05 }) for s in range(24)}, + }} + + RETURN_TYPES = ("CLIP",) + FUNCTION = "execute" + + CATEGORY = "essentials/conditioning" + + def execute(self, clip, apply_to_query, apply_to_key, apply_to_value, apply_to_out, **values): + if not apply_to_key and not apply_to_query and not apply_to_value and not apply_to_out: + return (clip, ) + + m = clip.clone() + sd = m.patcher.model_state_dict() + + for k in sd: + if "SelfAttention" in k: + block = re.search(r"\.block\.(\d+)\.", k) + block = int(block.group(1)) if block else None + + if block is not None and values[f"t5xxl_{block}"] != 1.0: + if (apply_to_query and ".q." in k) or (apply_to_key and ".k." in k) or (apply_to_value and ".v." in k) or (apply_to_out and ".o." in k): + m.add_patches({k: (None,)}, 0.0, values[f"t5xxl_{block}"]) + + return (m, ) + +class FluxBlocksBuster: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "model": ("MODEL",), + "blocks": ("STRING", {"default": "## 0 = 1.0\n## 1 = 1.0\n## 2 = 1.0\n## 3 = 1.0\n## 4 = 1.0\n## 5 = 1.0\n## 6 = 1.0\n## 7 = 1.0\n## 8 = 1.0\n## 9 = 1.0\n## 10 = 1.0\n## 11 = 1.0\n## 12 = 1.0\n## 13 = 1.0\n## 14 = 1.0\n## 15 = 1.0\n## 16 = 1.0\n## 17 = 1.0\n## 18 = 1.0\n# 0 = 1.0\n# 1 = 1.0\n# 2 = 1.0\n# 3 = 1.0\n# 4 = 1.0\n# 5 = 1.0\n# 6 = 1.0\n# 7 = 1.0\n# 8 = 1.0\n# 9 = 1.0\n# 10 = 1.0\n# 11 = 1.0\n# 12 = 1.0\n# 13 = 1.0\n# 14 = 1.0\n# 15 = 1.0\n# 16 = 1.0\n# 17 = 1.0\n# 18 = 1.0\n# 19 = 1.0\n# 20 = 1.0\n# 21 = 1.0\n# 22 = 1.0\n# 23 = 1.0\n# 24 = 1.0\n# 25 = 1.0\n# 26 = 1.0\n# 27 = 1.0\n# 28 = 1.0\n# 29 = 1.0\n# 30 = 1.0\n# 31 = 1.0\n# 32 = 1.0\n# 33 = 1.0\n# 34 = 1.0\n# 35 = 1.0\n# 36 = 1.0\n# 37 = 1.0", "multiline": True, "dynamicPrompts": True}), + #**{f"double_block_{s}": ("FLOAT", { "display": "slider", "default": 1.0, "min": 0, "max": 5, "step": 0.05 }) for s in range(19)}, + #**{f"single_block_{s}": ("FLOAT", { "display": "slider", "default": 1.0, "min": 0, "max": 5, "step": 0.05 }) for s in range(38)}, + }} + RETURN_TYPES = ("MODEL", "STRING") + RETURN_NAMES = ("MODEL", "patched_blocks") + FUNCTION = "patch" + + CATEGORY = "essentials/conditioning" + + def patch(self, model, blocks): + if blocks == "": + return (model, ) + + m = model.clone() + sd = model.model_state_dict() + patched_blocks = [] + + """ + Also compatible with the following format: + + double_blocks\.0\.(img|txt)_(mod|attn|mlp)\.(lin|qkv|proj|0|2)\.(weight|bias)=1.1 + single_blocks\.0\.(linear[12]|modulation\.lin)\.(weight|bias)=1.1 + + The regex is used to match the block names + """ + + blocks = blocks.split("\n") + blocks = [b.strip() for b in blocks if b.strip()] + + for k in sd: + for block in blocks: + block = block.split("=") + value = float(block[1].strip()) if len(block) > 1 else 1.0 + block = block[0].strip() + if block.startswith("##"): + block = r"double_blocks\." + block[2:].strip() + r"\.(img|txt)_(mod|attn|mlp)\.(lin|qkv|proj|0|2)\.(weight|bias)" + elif block.startswith("#"): + block = r"single_blocks\." + block[1:].strip() + r"\.(linear[12]|modulation\.lin)\.(weight|bias)" + + if value != 1.0 and re.search(block, k): + m.add_patches({k: (None,)}, 0.0, value) + patched_blocks.append(f"{k}: {value}") + + patched_blocks = "\n".join(patched_blocks) + + return (m, patched_blocks,) + + +COND_CLASS_MAPPINGS = { + "CLIPTextEncodeSDXL+": CLIPTextEncodeSDXLSimplified, + "ConditioningCombineMultiple+": ConditioningCombineMultiple, + "SD3NegativeConditioning+": SD3NegativeConditioning, + "FluxAttentionSeeker+": FluxAttentionSeeker, + "SD3AttentionSeekerLG+": SD3AttentionSeekerLG, + "SD3AttentionSeekerT5+": SD3AttentionSeekerT5, + "FluxBlocksBuster+": FluxBlocksBuster, +} + +COND_NAME_MAPPINGS = { + "CLIPTextEncodeSDXL+": "πŸ”§ SDXL CLIPTextEncode", + "ConditioningCombineMultiple+": "πŸ”§ Cond Combine Multiple", + "SD3NegativeConditioning+": "πŸ”§ SD3 Negative Conditioning", + "FluxAttentionSeeker+": "πŸ”§ Flux Attention Seeker", + "SD3AttentionSeekerLG+": "πŸ”§ SD3 Attention Seeker L/G", + "SD3AttentionSeekerT5+": "πŸ”§ SD3 Attention Seeker T5", + "FluxBlocksBuster+": "πŸ”§ Flux Model Blocks Buster", +} \ No newline at end of file diff --git a/custom_nodes/ComfyUI-essentials-main/fonts/ShareTechMono-Regular.ttf b/custom_nodes/ComfyUI-essentials-main/fonts/ShareTechMono-Regular.ttf new file mode 100644 index 0000000000000000000000000000000000000000..0ae0b19750c51a751bc45f54622443d55d643999 Binary files /dev/null and b/custom_nodes/ComfyUI-essentials-main/fonts/ShareTechMono-Regular.ttf differ diff --git a/custom_nodes/ComfyUI-essentials-main/fonts/put_font_files_here.txt b/custom_nodes/ComfyUI-essentials-main/fonts/put_font_files_here.txt new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/custom_nodes/ComfyUI-essentials-main/histogram_matching.py b/custom_nodes/ComfyUI-essentials-main/histogram_matching.py new file mode 100644 index 0000000000000000000000000000000000000000..f7b469446ad98c6e310453981c4e439431e98049 --- /dev/null +++ b/custom_nodes/ComfyUI-essentials-main/histogram_matching.py @@ -0,0 +1,87 @@ +# from MIT licensed https://github.com/nemodleo/pytorch-histogram-matching +import torch +import torch.nn as nn +import torch.nn.functional as F + +class Histogram_Matching(nn.Module): + def __init__(self, differentiable=False): + super(Histogram_Matching, self).__init__() + self.differentiable = differentiable + + def forward(self, dst, ref): + # B C + B, C, H, W = dst.size() + # assertion + assert dst.device == ref.device + # [B*C 256] + hist_dst = self.cal_hist(dst) + hist_ref = self.cal_hist(ref) + # [B*C 256] + tables = self.cal_trans_batch(hist_dst, hist_ref) + # [B C H W] + rst = dst.clone() + for b in range(B): + for c in range(C): + rst[b,c] = tables[b*c, (dst[b,c] * 255).long()] + # [B C H W] + rst /= 255. + return rst + + def cal_hist(self, img): + B, C, H, W = img.size() + # [B*C 256] + if self.differentiable: + hists = self.soft_histc_batch(img * 255, bins=256, min=0, max=256, sigma=3*25) + else: + hists = torch.stack([torch.histc(img[b,c] * 255, bins=256, min=0, max=255) for b in range(B) for c in range(C)]) + hists = hists.float() + hists = F.normalize(hists, p=1) + # BC 256 + bc, n = hists.size() + # [B*C 256 256] + triu = torch.ones(bc, n, n, device=hists.device).triu() + # [B*C 256] + hists = torch.bmm(hists[:,None,:], triu)[:,0,:] + return hists + + def soft_histc_batch(self, x, bins=256, min=0, max=256, sigma=3*25): + # B C H W + B, C, H, W = x.size() + # [B*C H*W] + x = x.view(B*C, -1) + # 1 + delta = float(max - min) / float(bins) + # [256] + centers = float(min) + delta * (torch.arange(bins, device=x.device, dtype=torch.bfloat16) + 0.5) + # [B*C 1 H*W] + x = torch.unsqueeze(x, 1) + # [1 256 1] + centers = centers[None,:,None] + # [B*C 256 H*W] + x = x - centers + # [B*C 256 H*W] + x = x.type(torch.bfloat16) + # [B*C 256 H*W] + x = torch.sigmoid(sigma * (x + delta/2)) - torch.sigmoid(sigma * (x - delta/2)) + # [B*C 256] + x = x.sum(dim=2) + # [B*C 256] + x = x.type(torch.float32) + # prevent oom + # torch.cuda.empty_cache() + return x + + def cal_trans_batch(self, hist_dst, hist_ref): + # [B*C 256 256] + hist_dst = hist_dst[:,None,:].repeat(1,256,1) + # [B*C 256 256] + hist_ref = hist_ref[:,:,None].repeat(1,1,256) + # [B*C 256 256] + table = hist_dst - hist_ref + # [B*C 256 256] + table = torch.where(table>=0, 1., 0.) + # [B*C 256] + table = torch.sum(table, dim=1) - 1 + # [B*C 256] + table = torch.clamp(table, min=0, max=255) + return table diff --git a/custom_nodes/ComfyUI-essentials-main/image.py b/custom_nodes/ComfyUI-essentials-main/image.py new file mode 100644 index 0000000000000000000000000000000000000000..d2ee3e3ae350befd7a2f299f44bf8ed3ab26ad92 --- /dev/null +++ b/custom_nodes/ComfyUI-essentials-main/image.py @@ -0,0 +1,1770 @@ +from .utils import max_, min_ +from nodes import MAX_RESOLUTION +import comfy.utils +from nodes import SaveImage +from node_helpers import pillow +from PIL import Image, ImageOps + +import kornia +import torch +import torch.nn.functional as F +import torchvision.transforms.v2 as T + +#import warnings +#warnings.filterwarnings('ignore', module="torchvision") +import math +import os +import numpy as np +import folder_paths +from pathlib import Path +import random + +""" +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + Image analysis +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +""" + +class ImageEnhanceDifference: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "image1": ("IMAGE",), + "image2": ("IMAGE",), + "exponent": ("FLOAT", { "default": 0.75, "min": 0.00, "max": 1.00, "step": 0.05, }), + } + } + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "execute" + CATEGORY = "essentials/image analysis" + + def execute(self, image1, image2, exponent): + if image1.shape[1:] != image2.shape[1:]: + image2 = comfy.utils.common_upscale(image2.permute([0,3,1,2]), image1.shape[2], image1.shape[1], upscale_method='bicubic', crop='center').permute([0,2,3,1]) + + diff_image = image1 - image2 + diff_image = torch.pow(diff_image, exponent) + diff_image = torch.clamp(diff_image, 0, 1) + + return(diff_image,) + +""" +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + Batch tools +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +""" + +class ImageBatchMultiple: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "image_1": ("IMAGE",), + "method": (["nearest-exact", "bilinear", "area", "bicubic", "lanczos"], { "default": "lanczos" }), + }, "optional": { + "image_2": ("IMAGE",), + "image_3": ("IMAGE",), + "image_4": ("IMAGE",), + "image_5": ("IMAGE",), + }, + } + RETURN_TYPES = ("IMAGE",) + FUNCTION = "execute" + CATEGORY = "essentials/image batch" + + def execute(self, image_1, method, image_2=None, image_3=None, image_4=None, image_5=None): + out = image_1 + + if image_2 is not None: + if image_1.shape[1:] != image_2.shape[1:]: + image_2 = comfy.utils.common_upscale(image_2.movedim(-1,1), image_1.shape[2], image_1.shape[1], method, "center").movedim(1,-1) + out = torch.cat((image_1, image_2), dim=0) + if image_3 is not None: + if image_1.shape[1:] != image_3.shape[1:]: + image_3 = comfy.utils.common_upscale(image_3.movedim(-1,1), image_1.shape[2], image_1.shape[1], method, "center").movedim(1,-1) + out = torch.cat((out, image_3), dim=0) + if image_4 is not None: + if image_1.shape[1:] != image_4.shape[1:]: + image_4 = comfy.utils.common_upscale(image_4.movedim(-1,1), image_1.shape[2], image_1.shape[1], method, "center").movedim(1,-1) + out = torch.cat((out, image_4), dim=0) + if image_5 is not None: + if image_1.shape[1:] != image_5.shape[1:]: + image_5 = comfy.utils.common_upscale(image_5.movedim(-1,1), image_1.shape[2], image_1.shape[1], method, "center").movedim(1,-1) + out = torch.cat((out, image_5), dim=0) + + return (out,) + + +class ImageExpandBatch: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "image": ("IMAGE",), + "size": ("INT", { "default": 16, "min": 1, "step": 1, }), + "method": (["expand", "repeat all", "repeat first", "repeat last"],) + } + } + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "execute" + CATEGORY = "essentials/image batch" + + def execute(self, image, size, method): + orig_size = image.shape[0] + + if orig_size == size: + return (image,) + + if size <= 1: + return (image[:size],) + + if 'expand' in method: + out = torch.empty([size] + list(image.shape)[1:], dtype=image.dtype, device=image.device) + if size < orig_size: + scale = (orig_size - 1) / (size - 1) + for i in range(size): + out[i] = image[min(round(i * scale), orig_size - 1)] + else: + scale = orig_size / size + for i in range(size): + out[i] = image[min(math.floor((i + 0.5) * scale), orig_size - 1)] + elif 'all' in method: + out = image.repeat([math.ceil(size / image.shape[0])] + [1] * (len(image.shape) - 1))[:size] + elif 'first' in method: + if size < image.shape[0]: + out = image[:size] + else: + out = torch.cat([image[:1].repeat(size-image.shape[0], 1, 1, 1), image], dim=0) + elif 'last' in method: + if size < image.shape[0]: + out = image[:size] + else: + out = torch.cat((image, image[-1:].repeat((size-image.shape[0], 1, 1, 1))), dim=0) + + return (out,) + +class ImageFromBatch: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "image": ("IMAGE", ), + "start": ("INT", { "default": 0, "min": 0, "step": 1, }), + "length": ("INT", { "default": -1, "min": -1, "step": 1, }), + } + } + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "execute" + CATEGORY = "essentials/image batch" + + def execute(self, image, start, length): + if length<0: + length = image.shape[0] + start = min(start, image.shape[0]-1) + length = min(image.shape[0]-start, length) + return (image[start:start + length], ) + + +class ImageListToBatch: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "image": ("IMAGE",), + } + } + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "execute" + INPUT_IS_LIST = True + CATEGORY = "essentials/image batch" + + def execute(self, image): + shape = image[0].shape[1:3] + out = [] + + for i in range(len(image)): + img = image[i] + if image[i].shape[1:3] != shape: + img = comfy.utils.common_upscale(img.permute([0,3,1,2]), shape[1], shape[0], upscale_method='bicubic', crop='center').permute([0,2,3,1]) + out.append(img) + + out = torch.cat(out, dim=0) + + return (out,) + +class ImageBatchToList: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "image": ("IMAGE",), + } + } + + RETURN_TYPES = ("IMAGE",) + OUTPUT_IS_LIST = (True,) + FUNCTION = "execute" + CATEGORY = "essentials/image batch" + + def execute(self, image): + return ([image[i].unsqueeze(0) for i in range(image.shape[0])], ) + + +""" +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + Image manipulation +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +""" + +class ImageCompositeFromMaskBatch: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "image_from": ("IMAGE", ), + "image_to": ("IMAGE", ), + "mask": ("MASK", ) + } + } + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "execute" + CATEGORY = "essentials/image manipulation" + + def execute(self, image_from, image_to, mask): + frames = mask.shape[0] + + if image_from.shape[1] != image_to.shape[1] or image_from.shape[2] != image_to.shape[2]: + image_to = comfy.utils.common_upscale(image_to.permute([0,3,1,2]), image_from.shape[2], image_from.shape[1], upscale_method='bicubic', crop='center').permute([0,2,3,1]) + + if frames < image_from.shape[0]: + image_from = image_from[:frames] + elif frames > image_from.shape[0]: + image_from = torch.cat((image_from, image_from[-1].unsqueeze(0).repeat(frames-image_from.shape[0], 1, 1, 1)), dim=0) + + mask = mask.unsqueeze(3).repeat(1, 1, 1, 3) + + if image_from.shape[1] != mask.shape[1] or image_from.shape[2] != mask.shape[2]: + mask = comfy.utils.common_upscale(mask.permute([0,3,1,2]), image_from.shape[2], image_from.shape[1], upscale_method='bicubic', crop='center').permute([0,2,3,1]) + + out = mask * image_to + (1 - mask) * image_from + + return (out, ) + +class ImageComposite: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "destination": ("IMAGE",), + "source": ("IMAGE",), + "x": ("INT", { "default": 0, "min": -MAX_RESOLUTION, "max": MAX_RESOLUTION, "step": 1 }), + "y": ("INT", { "default": 0, "min": -MAX_RESOLUTION, "max": MAX_RESOLUTION, "step": 1 }), + "offset_x": ("INT", { "default": 0, "min": -MAX_RESOLUTION, "max": MAX_RESOLUTION, "step": 1 }), + "offset_y": ("INT", { "default": 0, "min": -MAX_RESOLUTION, "max": MAX_RESOLUTION, "step": 1 }), + }, + "optional": { + "mask": ("MASK",), + } + } + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "execute" + CATEGORY = "essentials/image manipulation" + + def execute(self, destination, source, x, y, offset_x, offset_y, mask=None): + if mask is None: + mask = torch.ones_like(source)[:,:,:,0] + + mask = mask.unsqueeze(-1).repeat(1, 1, 1, 3) + + if mask.shape[1:3] != source.shape[1:3]: + mask = F.interpolate(mask.permute([0, 3, 1, 2]), size=(source.shape[1], source.shape[2]), mode='bicubic') + mask = mask.permute([0, 2, 3, 1]) + + if mask.shape[0] > source.shape[0]: + mask = mask[:source.shape[0]] + elif mask.shape[0] < source.shape[0]: + mask = torch.cat((mask, mask[-1:].repeat((source.shape[0]-mask.shape[0], 1, 1, 1))), dim=0) + + if destination.shape[0] > source.shape[0]: + destination = destination[:source.shape[0]] + elif destination.shape[0] < source.shape[0]: + destination = torch.cat((destination, destination[-1:].repeat((source.shape[0]-destination.shape[0], 1, 1, 1))), dim=0) + + if not isinstance(x, list): + x = [x] + if not isinstance(y, list): + y = [y] + + if len(x) < destination.shape[0]: + x = x + [x[-1]] * (destination.shape[0] - len(x)) + if len(y) < destination.shape[0]: + y = y + [y[-1]] * (destination.shape[0] - len(y)) + + x = [i + offset_x for i in x] + y = [i + offset_y for i in y] + + output = [] + for i in range(destination.shape[0]): + d = destination[i].clone() + s = source[i] + m = mask[i] + + if x[i]+source.shape[2] > destination.shape[2]: + s = s[:, :, :destination.shape[2]-x[i], :] + m = m[:, :, :destination.shape[2]-x[i], :] + if y[i]+source.shape[1] > destination.shape[1]: + s = s[:, :destination.shape[1]-y[i], :, :] + m = m[:destination.shape[1]-y[i], :, :] + + #output.append(s * m + d[y[i]:y[i]+s.shape[0], x[i]:x[i]+s.shape[1], :] * (1 - m)) + d[y[i]:y[i]+s.shape[0], x[i]:x[i]+s.shape[1], :] = s * m + d[y[i]:y[i]+s.shape[0], x[i]:x[i]+s.shape[1], :] * (1 - m) + output.append(d) + + output = torch.stack(output) + + # apply the source to the destination at XY position using the mask + #for i in range(destination.shape[0]): + # output[i, y[i]:y[i]+source.shape[1], x[i]:x[i]+source.shape[2], :] = source * mask + destination[i, y[i]:y[i]+source.shape[1], x[i]:x[i]+source.shape[2], :] * (1 - mask) + + #for x_, y_ in zip(x, y): + # output[:, y_:y_+source.shape[1], x_:x_+source.shape[2], :] = source * mask + destination[:, y_:y_+source.shape[1], x_:x_+source.shape[2], :] * (1 - mask) + + #output[:, y:y+source.shape[1], x:x+source.shape[2], :] = source * mask + destination[:, y:y+source.shape[1], x:x+source.shape[2], :] * (1 - mask) + #output = destination * (1 - mask) + source * mask + + return (output,) + +class ImageResize: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "image": ("IMAGE",), + "width": ("INT", { "default": 512, "min": 0, "max": MAX_RESOLUTION, "step": 1, }), + "height": ("INT", { "default": 512, "min": 0, "max": MAX_RESOLUTION, "step": 1, }), + "interpolation": (["nearest", "bilinear", "bicubic", "area", "nearest-exact", "lanczos"],), + "method": (["stretch", "keep proportion", "fill / crop", "pad"],), + "condition": (["always", "downscale if bigger", "upscale if smaller", "if bigger area", "if smaller area"],), + "multiple_of": ("INT", { "default": 0, "min": 0, "max": 512, "step": 1, }), + } + } + + RETURN_TYPES = ("IMAGE", "INT", "INT",) + RETURN_NAMES = ("IMAGE", "width", "height",) + FUNCTION = "execute" + CATEGORY = "essentials/image manipulation" + + def execute(self, image, width, height, method="stretch", interpolation="nearest", condition="always", multiple_of=0, keep_proportion=False): + _, oh, ow, _ = image.shape + x = y = x2 = y2 = 0 + pad_left = pad_right = pad_top = pad_bottom = 0 + + if keep_proportion: + method = "keep proportion" + + if multiple_of > 1: + width = width - (width % multiple_of) + height = height - (height % multiple_of) + + if method == 'keep proportion' or method == 'pad': + if width == 0 and oh < height: + width = MAX_RESOLUTION + elif width == 0 and oh >= height: + width = ow + + if height == 0 and ow < width: + height = MAX_RESOLUTION + elif height == 0 and ow >= width: + height = oh + + ratio = min(width / ow, height / oh) + new_width = round(ow*ratio) + new_height = round(oh*ratio) + + if method == 'pad': + pad_left = (width - new_width) // 2 + pad_right = width - new_width - pad_left + pad_top = (height - new_height) // 2 + pad_bottom = height - new_height - pad_top + + width = new_width + height = new_height + elif method.startswith('fill'): + width = width if width > 0 else ow + height = height if height > 0 else oh + + ratio = max(width / ow, height / oh) + new_width = round(ow*ratio) + new_height = round(oh*ratio) + x = (new_width - width) // 2 + y = (new_height - height) // 2 + x2 = x + width + y2 = y + height + if x2 > new_width: + x -= (x2 - new_width) + if x < 0: + x = 0 + if y2 > new_height: + y -= (y2 - new_height) + if y < 0: + y = 0 + width = new_width + height = new_height + else: + width = width if width > 0 else ow + height = height if height > 0 else oh + + if "always" in condition \ + or ("downscale if bigger" == condition and (oh > height or ow > width)) or ("upscale if smaller" == condition and (oh < height or ow < width)) \ + or ("bigger area" in condition and (oh * ow > height * width)) or ("smaller area" in condition and (oh * ow < height * width)): + + outputs = image.permute(0,3,1,2) + + if interpolation == "lanczos": + outputs = comfy.utils.lanczos(outputs, width, height) + else: + outputs = F.interpolate(outputs, size=(height, width), mode=interpolation) + + if method == 'pad': + if pad_left > 0 or pad_right > 0 or pad_top > 0 or pad_bottom > 0: + outputs = F.pad(outputs, (pad_left, pad_right, pad_top, pad_bottom), value=0) + + outputs = outputs.permute(0,2,3,1) + + if method.startswith('fill'): + if x > 0 or y > 0 or x2 > 0 or y2 > 0: + outputs = outputs[:, y:y2, x:x2, :] + else: + outputs = image + + if multiple_of > 1 and (outputs.shape[2] % multiple_of != 0 or outputs.shape[1] % multiple_of != 0): + width = outputs.shape[2] + height = outputs.shape[1] + x = (width % multiple_of) // 2 + y = (height % multiple_of) // 2 + x2 = width - ((width % multiple_of) - x) + y2 = height - ((height % multiple_of) - y) + outputs = outputs[:, y:y2, x:x2, :] + + outputs = torch.clamp(outputs, 0, 1) + + return(outputs, outputs.shape[2], outputs.shape[1],) + +class ImageFlip: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "image": ("IMAGE",), + "axis": (["x", "y", "xy"],), + } + } + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "execute" + CATEGORY = "essentials/image manipulation" + + def execute(self, image, axis): + dim = () + if "y" in axis: + dim += (1,) + if "x" in axis: + dim += (2,) + image = torch.flip(image, dim) + + return(image,) + +class ImageCrop: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "image": ("IMAGE",), + "width": ("INT", { "default": 256, "min": 0, "max": MAX_RESOLUTION, "step": 8, }), + "height": ("INT", { "default": 256, "min": 0, "max": MAX_RESOLUTION, "step": 8, }), + "position": (["top-left", "top-center", "top-right", "right-center", "bottom-right", "bottom-center", "bottom-left", "left-center", "center"],), + "x_offset": ("INT", { "default": 0, "min": -99999, "step": 1, }), + "y_offset": ("INT", { "default": 0, "min": -99999, "step": 1, }), + } + } + + RETURN_TYPES = ("IMAGE","INT","INT",) + RETURN_NAMES = ("IMAGE","x","y",) + FUNCTION = "execute" + CATEGORY = "essentials/image manipulation" + + def execute(self, image, width, height, position, x_offset, y_offset): + _, oh, ow, _ = image.shape + + width = min(ow, width) + height = min(oh, height) + + if "center" in position: + x = round((ow-width) / 2) + y = round((oh-height) / 2) + if "top" in position: + y = 0 + if "bottom" in position: + y = oh-height + if "left" in position: + x = 0 + if "right" in position: + x = ow-width + + x += x_offset + y += y_offset + + x2 = x+width + y2 = y+height + + if x2 > ow: + x2 = ow + if x < 0: + x = 0 + if y2 > oh: + y2 = oh + if y < 0: + y = 0 + + image = image[:, y:y2, x:x2, :] + + return(image, x, y, ) + +class ImageTile: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "image": ("IMAGE",), + "rows": ("INT", { "default": 2, "min": 1, "max": 256, "step": 1, }), + "cols": ("INT", { "default": 2, "min": 1, "max": 256, "step": 1, }), + "overlap": ("FLOAT", { "default": 0, "min": 0, "max": 0.5, "step": 0.01, }), + "overlap_x": ("INT", { "default": 0, "min": 0, "max": MAX_RESOLUTION//2, "step": 1, }), + "overlap_y": ("INT", { "default": 0, "min": 0, "max": MAX_RESOLUTION//2, "step": 1, }), + } + } + + RETURN_TYPES = ("IMAGE", "INT", "INT", "INT", "INT") + RETURN_NAMES = ("IMAGE", "tile_width", "tile_height", "overlap_x", "overlap_y",) + FUNCTION = "execute" + CATEGORY = "essentials/image manipulation" + + def execute(self, image, rows, cols, overlap, overlap_x, overlap_y): + h, w = image.shape[1:3] + tile_h = h // rows + tile_w = w // cols + h = tile_h * rows + w = tile_w * cols + overlap_h = int(tile_h * overlap) + overlap_y + overlap_w = int(tile_w * overlap) + overlap_x + + # max overlap is half of the tile size + overlap_h = min(tile_h // 2, overlap_h) + overlap_w = min(tile_w // 2, overlap_w) + + if rows == 1: + overlap_h = 0 + if cols == 1: + overlap_w = 0 + + tiles = [] + for i in range(rows): + for j in range(cols): + y1 = i * tile_h + x1 = j * tile_w + + if i > 0: + y1 -= overlap_h + if j > 0: + x1 -= overlap_w + + y2 = y1 + tile_h + overlap_h + x2 = x1 + tile_w + overlap_w + + if y2 > h: + y2 = h + y1 = y2 - tile_h - overlap_h + if x2 > w: + x2 = w + x1 = x2 - tile_w - overlap_w + + tiles.append(image[:, y1:y2, x1:x2, :]) + tiles = torch.cat(tiles, dim=0) + + return(tiles, tile_w+overlap_w, tile_h+overlap_h, overlap_w, overlap_h,) + +class ImageUntile: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "tiles": ("IMAGE",), + "overlap_x": ("INT", { "default": 0, "min": 0, "max": MAX_RESOLUTION//2, "step": 1, }), + "overlap_y": ("INT", { "default": 0, "min": 0, "max": MAX_RESOLUTION//2, "step": 1, }), + "rows": ("INT", { "default": 2, "min": 1, "max": 256, "step": 1, }), + "cols": ("INT", { "default": 2, "min": 1, "max": 256, "step": 1, }), + } + } + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "execute" + CATEGORY = "essentials/image manipulation" + + def execute(self, tiles, overlap_x, overlap_y, rows, cols): + tile_h, tile_w = tiles.shape[1:3] + tile_h -= overlap_y + tile_w -= overlap_x + out_w = cols * tile_w + out_h = rows * tile_h + + out = torch.zeros((1, out_h, out_w, tiles.shape[3]), device=tiles.device, dtype=tiles.dtype) + + for i in range(rows): + for j in range(cols): + y1 = i * tile_h + x1 = j * tile_w + + if i > 0: + y1 -= overlap_y + if j > 0: + x1 -= overlap_x + + y2 = y1 + tile_h + overlap_y + x2 = x1 + tile_w + overlap_x + + if y2 > out_h: + y2 = out_h + y1 = y2 - tile_h - overlap_y + if x2 > out_w: + x2 = out_w + x1 = x2 - tile_w - overlap_x + + mask = torch.ones((1, tile_h+overlap_y, tile_w+overlap_x), device=tiles.device, dtype=tiles.dtype) + + # feather the overlap on top + if i > 0 and overlap_y > 0: + mask[:, :overlap_y, :] *= torch.linspace(0, 1, overlap_y, device=tiles.device, dtype=tiles.dtype).unsqueeze(1) + # feather the overlap on bottom + #if i < rows - 1: + # mask[:, -overlap_y:, :] *= torch.linspace(1, 0, overlap_y, device=tiles.device, dtype=tiles.dtype).unsqueeze(1) + # feather the overlap on left + if j > 0 and overlap_x > 0: + mask[:, :, :overlap_x] *= torch.linspace(0, 1, overlap_x, device=tiles.device, dtype=tiles.dtype).unsqueeze(0) + # feather the overlap on right + #if j < cols - 1: + # mask[:, :, -overlap_x:] *= torch.linspace(1, 0, overlap_x, device=tiles.device, dtype=tiles.dtype).unsqueeze(0) + + mask = mask.unsqueeze(-1).repeat(1, 1, 1, tiles.shape[3]) + tile = tiles[i * cols + j] * mask + out[:, y1:y2, x1:x2, :] = out[:, y1:y2, x1:x2, :] * (1 - mask) + tile + return(out, ) + +class ImageSeamCarving: + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "image": ("IMAGE",), + "width": ("INT", { "default": 512, "min": 1, "max": MAX_RESOLUTION, "step": 1, }), + "height": ("INT", { "default": 512, "min": 1, "max": MAX_RESOLUTION, "step": 1, }), + "energy": (["backward", "forward"],), + "order": (["width-first", "height-first"],), + }, + "optional": { + "keep_mask": ("MASK",), + "drop_mask": ("MASK",), + } + } + + RETURN_TYPES = ("IMAGE",) + CATEGORY = "essentials/image manipulation" + FUNCTION = "execute" + + def execute(self, image, width, height, energy, order, keep_mask=None, drop_mask=None): + from .carve import seam_carving + + img = image.permute([0, 3, 1, 2]) + + if keep_mask is not None: + #keep_mask = keep_mask.reshape((-1, 1, keep_mask.shape[-2], keep_mask.shape[-1])).movedim(1, -1) + keep_mask = keep_mask.unsqueeze(1) + + if keep_mask.shape[2] != img.shape[2] or keep_mask.shape[3] != img.shape[3]: + keep_mask = F.interpolate(keep_mask, size=(img.shape[2], img.shape[3]), mode="bilinear") + if drop_mask is not None: + drop_mask = drop_mask.unsqueeze(1) + + if drop_mask.shape[2] != img.shape[2] or drop_mask.shape[3] != img.shape[3]: + drop_mask = F.interpolate(drop_mask, size=(img.shape[2], img.shape[3]), mode="bilinear") + + out = [] + for i in range(img.shape[0]): + resized = seam_carving( + T.ToPILImage()(img[i]), + size=(width, height), + energy_mode=energy, + order=order, + keep_mask=T.ToPILImage()(keep_mask[i]) if keep_mask is not None else None, + drop_mask=T.ToPILImage()(drop_mask[i]) if drop_mask is not None else None, + ) + out.append(T.ToTensor()(resized)) + + out = torch.stack(out).permute([0, 2, 3, 1]) + + return(out, ) + +class ImageRandomTransform: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "image": ("IMAGE",), + "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "repeat": ("INT", { "default": 1, "min": 1, "max": 256, "step": 1, }), + "variation": ("FLOAT", { "default": 0.1, "min": 0.0, "max": 1.0, "step": 0.05, }), + } + } + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "execute" + CATEGORY = "essentials/image manipulation" + + def execute(self, image, seed, repeat, variation): + h, w = image.shape[1:3] + image = image.repeat(repeat, 1, 1, 1).permute([0, 3, 1, 2]) + + distortion = 0.2 * variation + rotation = 5 * variation + brightness = 0.5 * variation + contrast = 0.5 * variation + saturation = 0.5 * variation + hue = 0.2 * variation + scale = 0.5 * variation + + torch.manual_seed(seed) + + out = [] + for i in image: + tramsforms = T.Compose([ + T.RandomPerspective(distortion_scale=distortion, p=0.5), + T.RandomRotation(degrees=rotation, interpolation=T.InterpolationMode.BILINEAR, expand=True), + T.ColorJitter(brightness=brightness, contrast=contrast, saturation=saturation, hue=(-hue, hue)), + T.RandomHorizontalFlip(p=0.5), + T.RandomResizedCrop((h, w), scale=(1-scale, 1+scale), ratio=(w/h, w/h), interpolation=T.InterpolationMode.BICUBIC), + ]) + out.append(tramsforms(i.unsqueeze(0))) + + out = torch.cat(out, dim=0).permute([0, 2, 3, 1]).clamp(0, 1) + + return (out,) + +class RemBGSession: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "model": (["u2net: general purpose", "u2netp: lightweight general purpose", "u2net_human_seg: human segmentation", "u2net_cloth_seg: cloths Parsing", "silueta: very small u2net", "isnet-general-use: general purpose", "isnet-anime: anime illustrations", "sam: general purpose"],), + "providers": (['CPU', 'CUDA', 'ROCM', 'DirectML', 'OpenVINO', 'CoreML', 'Tensorrt', 'Azure'],), + }, + } + + RETURN_TYPES = ("REMBG_SESSION",) + FUNCTION = "execute" + CATEGORY = "essentials/image manipulation" + + def execute(self, model, providers): + from rembg import new_session, remove + + model = model.split(":")[0] + + class Session: + def __init__(self, model, providers): + self.session = new_session(model, providers=[providers+"ExecutionProvider"]) + def process(self, image): + return remove(image, session=self.session) + + return (Session(model, providers),) + +class TransparentBGSession: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "mode": (["base", "fast", "base-nightly"],), + "use_jit": ("BOOLEAN", { "default": True }), + }, + } + + RETURN_TYPES = ("REMBG_SESSION",) + FUNCTION = "execute" + CATEGORY = "essentials/image manipulation" + + def execute(self, mode, use_jit): + from transparent_background import Remover + + class Session: + def __init__(self, mode, use_jit): + self.session = Remover(mode=mode, jit=use_jit) + def process(self, image): + return self.session.process(image) + + return (Session(mode, use_jit),) + +class ImageRemoveBackground: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "rembg_session": ("REMBG_SESSION",), + "image": ("IMAGE",), + }, + } + + RETURN_TYPES = ("IMAGE", "MASK",) + FUNCTION = "execute" + CATEGORY = "essentials/image manipulation" + + def execute(self, rembg_session, image): + image = image.permute([0, 3, 1, 2]) + output = [] + for img in image: + img = T.ToPILImage()(img) + img = rembg_session.process(img) + output.append(T.ToTensor()(img)) + + output = torch.stack(output, dim=0) + output = output.permute([0, 2, 3, 1]) + mask = output[:, :, :, 3] if output.shape[3] == 4 else torch.ones_like(output[:, :, :, 0]) + # output = output[:, :, :, :3] + + return(output, mask,) + +""" +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + Image processing +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +""" + +class ImageDesaturate: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "image": ("IMAGE",), + "factor": ("FLOAT", { "default": 1.00, "min": 0.00, "max": 1.00, "step": 0.05, }), + "method": (["luminance (Rec.709)", "luminance (Rec.601)", "average", "lightness"],), + } + } + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "execute" + CATEGORY = "essentials/image processing" + + def execute(self, image, factor, method): + if method == "luminance (Rec.709)": + grayscale = 0.2126 * image[..., 0] + 0.7152 * image[..., 1] + 0.0722 * image[..., 2] + elif method == "luminance (Rec.601)": + grayscale = 0.299 * image[..., 0] + 0.587 * image[..., 1] + 0.114 * image[..., 2] + elif method == "average": + grayscale = image.mean(dim=3) + elif method == "lightness": + grayscale = (torch.max(image, dim=3)[0] + torch.min(image, dim=3)[0]) / 2 + + grayscale = (1.0 - factor) * image + factor * grayscale.unsqueeze(-1).repeat(1, 1, 1, 3) + grayscale = torch.clamp(grayscale, 0, 1) + + return(grayscale,) + +class PixelOEPixelize: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "image": ("IMAGE",), + "downscale_mode": (["contrast", "bicubic", "nearest", "center", "k-centroid"],), + "target_size": ("INT", { "default": 128, "min": 0, "max": MAX_RESOLUTION, "step": 8 }), + "patch_size": ("INT", { "default": 16, "min": 4, "max": 32, "step": 2 }), + "thickness": ("INT", { "default": 2, "min": 1, "max": 16, "step": 1 }), + "color_matching": ("BOOLEAN", { "default": True }), + "upscale": ("BOOLEAN", { "default": True }), + #"contrast": ("FLOAT", { "default": 1.0, "min": 0.0, "max": 100.0, "step": 0.1 }), + #"saturation": ("FLOAT", { "default": 1.0, "min": 0.0, "max": 100.0, "step": 0.1 }), + }, + } + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "execute" + CATEGORY = "essentials/image processing" + + def execute(self, image, downscale_mode, target_size, patch_size, thickness, color_matching, upscale): + from pixeloe.pixelize import pixelize + + image = image.clone().mul(255).clamp(0, 255).byte().cpu().numpy() + output = [] + for img in image: + img = pixelize(img, + mode=downscale_mode, + target_size=target_size, + patch_size=patch_size, + thickness=thickness, + contrast=1.0, + saturation=1.0, + color_matching=color_matching, + no_upscale=not upscale) + output.append(T.ToTensor()(img)) + + output = torch.stack(output, dim=0).permute([0, 2, 3, 1]) + + return(output,) + +class ImagePosterize: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "image": ("IMAGE",), + "threshold": ("FLOAT", { "default": 0.50, "min": 0.00, "max": 1.00, "step": 0.05, }), + } + } + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "execute" + CATEGORY = "essentials/image processing" + + def execute(self, image, threshold): + image = image.mean(dim=3, keepdim=True) + image = (image > threshold).float() + image = image.repeat(1, 1, 1, 3) + + return(image,) + +# From https://github.com/yoonsikp/pycubelut/blob/master/pycubelut.py (MIT license) +class ImageApplyLUT: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "image": ("IMAGE",), + "lut_file": (folder_paths.get_filename_list("luts"),), + "gamma_correction": ("BOOLEAN", { "default": True }), + "clip_values": ("BOOLEAN", { "default": True }), + "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.1 }), + }} + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "execute" + CATEGORY = "essentials/image processing" + + # TODO: check if we can do without numpy + def execute(self, image, lut_file, gamma_correction, clip_values, strength): + lut_file_path = folder_paths.get_full_path("luts", lut_file) + if not lut_file_path or not Path(lut_file_path).exists(): + print(f"Could not find LUT file: {lut_file_path}") + return (image,) + + from colour.io.luts.iridas_cube import read_LUT_IridasCube + + device = image.device + lut = read_LUT_IridasCube(lut_file_path) + lut.name = lut_file + + if clip_values: + if lut.domain[0].max() == lut.domain[0].min() and lut.domain[1].max() == lut.domain[1].min(): + lut.table = np.clip(lut.table, lut.domain[0, 0], lut.domain[1, 0]) + else: + if len(lut.table.shape) == 2: # 3x1D + for dim in range(3): + lut.table[:, dim] = np.clip(lut.table[:, dim], lut.domain[0, dim], lut.domain[1, dim]) + else: # 3D + for dim in range(3): + lut.table[:, :, :, dim] = np.clip(lut.table[:, :, :, dim], lut.domain[0, dim], lut.domain[1, dim]) + + out = [] + for img in image: # TODO: is this more resource efficient? should we use a batch instead? + lut_img = img.cpu().numpy().copy() + + is_non_default_domain = not np.array_equal(lut.domain, np.array([[0., 0., 0.], [1., 1., 1.]])) + dom_scale = None + if is_non_default_domain: + dom_scale = lut.domain[1] - lut.domain[0] + lut_img = lut_img * dom_scale + lut.domain[0] + if gamma_correction: + lut_img = lut_img ** (1/2.2) + lut_img = lut.apply(lut_img) + if gamma_correction: + lut_img = lut_img ** (2.2) + if is_non_default_domain: + lut_img = (lut_img - lut.domain[0]) / dom_scale + + lut_img = torch.from_numpy(lut_img).to(device) + if strength < 1.0: + lut_img = strength * lut_img + (1 - strength) * img + out.append(lut_img) + + out = torch.stack(out) + + return (out, ) + +# From https://github.com/Jamy-L/Pytorch-Contrast-Adaptive-Sharpening/ +class ImageCAS: + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "image": ("IMAGE",), + "amount": ("FLOAT", {"default": 0.8, "min": 0, "max": 1, "step": 0.05}), + }, + } + + RETURN_TYPES = ("IMAGE",) + CATEGORY = "essentials/image processing" + FUNCTION = "execute" + + def execute(self, image, amount): + epsilon = 1e-5 + img = F.pad(image.permute([0,3,1,2]), pad=(1, 1, 1, 1)) + + a = img[..., :-2, :-2] + b = img[..., :-2, 1:-1] + c = img[..., :-2, 2:] + d = img[..., 1:-1, :-2] + e = img[..., 1:-1, 1:-1] + f = img[..., 1:-1, 2:] + g = img[..., 2:, :-2] + h = img[..., 2:, 1:-1] + i = img[..., 2:, 2:] + + # Computing contrast + cross = (b, d, e, f, h) + mn = min_(cross) + mx = max_(cross) + + diag = (a, c, g, i) + mn2 = min_(diag) + mx2 = max_(diag) + mx = mx + mx2 + mn = mn + mn2 + + # Computing local weight + inv_mx = torch.reciprocal(mx + epsilon) + amp = inv_mx * torch.minimum(mn, (2 - mx)) + + # scaling + amp = torch.sqrt(amp) + w = - amp * (amount * (1/5 - 1/8) + 1/8) + div = torch.reciprocal(1 + 4*w) + + output = ((b + d + f + h)*w + e) * div + output = output.clamp(0, 1) + #output = torch.nan_to_num(output) + + output = output.permute([0,2,3,1]) + + return (output,) + +class ImageSmartSharpen: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "image": ("IMAGE",), + "noise_radius": ("INT", { "default": 7, "min": 1, "max": 25, "step": 1, }), + "preserve_edges": ("FLOAT", { "default": 0.75, "min": 0.0, "max": 1.0, "step": 0.05 }), + "sharpen": ("FLOAT", { "default": 5.0, "min": 0.0, "max": 25.0, "step": 0.5 }), + "ratio": ("FLOAT", { "default": 0.5, "min": 0.0, "max": 1.0, "step": 0.1 }), + }} + + RETURN_TYPES = ("IMAGE",) + CATEGORY = "essentials/image processing" + FUNCTION = "execute" + + def execute(self, image, noise_radius, preserve_edges, sharpen, ratio): + import cv2 + + output = [] + #diagonal = np.sqrt(image.shape[1]**2 + image.shape[2]**2) + if preserve_edges > 0: + preserve_edges = max(1 - preserve_edges, 0.05) + + for img in image: + if noise_radius > 1: + sigma = 0.3 * ((noise_radius - 1) * 0.5 - 1) + 0.8 # this is what pytorch uses for blur + #sigma_color = preserve_edges * (diagonal / 2048) + blurred = cv2.bilateralFilter(img.cpu().numpy(), noise_radius, preserve_edges, sigma) + blurred = torch.from_numpy(blurred) + else: + blurred = img + + if sharpen > 0: + sharpened = kornia.enhance.sharpness(img.permute(2,0,1), sharpen).permute(1,2,0) + else: + sharpened = img + + img = ratio * sharpened + (1 - ratio) * blurred + img = torch.clamp(img, 0, 1) + output.append(img) + + del blurred, sharpened + output = torch.stack(output) + + return (output,) + + +class ExtractKeyframes: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "image": ("IMAGE",), + "threshold": ("FLOAT", { "default": 0.85, "min": 0.00, "max": 1.00, "step": 0.01, }), + } + } + + RETURN_TYPES = ("IMAGE", "STRING") + RETURN_NAMES = ("KEYFRAMES", "indexes") + + FUNCTION = "execute" + CATEGORY = "essentials" + + def execute(self, image, threshold): + window_size = 2 + + variations = torch.sum(torch.abs(image[1:] - image[:-1]), dim=[1, 2, 3]) + #variations = torch.sum((image[1:] - image[:-1]) ** 2, dim=[1, 2, 3]) + threshold = torch.quantile(variations.float(), threshold).item() + + keyframes = [] + for i in range(image.shape[0] - window_size + 1): + window = image[i:i + window_size] + variation = torch.sum(torch.abs(window[-1] - window[0])).item() + + if variation > threshold: + keyframes.append(i + window_size - 1) + + return (image[keyframes], ','.join(map(str, keyframes)),) + +class ImageColorMatch: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "image": ("IMAGE",), + "reference": ("IMAGE",), + "color_space": (["LAB", "YCbCr", "RGB", "LUV", "YUV", "XYZ"],), + "factor": ("FLOAT", { "default": 1.0, "min": 0.0, "max": 1.0, "step": 0.05, }), + "device": (["auto", "cpu", "gpu"],), + "batch_size": ("INT", { "default": 0, "min": 0, "max": 1024, "step": 1, }), + }, + "optional": { + "reference_mask": ("MASK",), + } + } + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "execute" + CATEGORY = "essentials/image processing" + + def execute(self, image, reference, color_space, factor, device, batch_size, reference_mask=None): + if "gpu" == device: + device = comfy.model_management.get_torch_device() + elif "auto" == device: + device = comfy.model_management.intermediate_device() + else: + device = 'cpu' + + image = image.permute([0, 3, 1, 2]) + reference = reference.permute([0, 3, 1, 2]).to(device) + + # Ensure reference_mask is in the correct format and on the right device + if reference_mask is not None: + assert reference_mask.ndim == 3, f"Expected reference_mask to have 3 dimensions, but got {reference_mask.ndim}" + assert reference_mask.shape[0] == reference.shape[0], f"Frame count mismatch: reference_mask has {reference_mask.shape[0]} frames, but reference has {reference.shape[0]}" + + # Reshape mask to (batch, 1, height, width) + reference_mask = reference_mask.unsqueeze(1).to(device) + + # Ensure the mask is binary (0 or 1) + reference_mask = (reference_mask > 0.5).float() + + # Ensure spatial dimensions match + if reference_mask.shape[2:] != reference.shape[2:]: + reference_mask = comfy.utils.common_upscale( + reference_mask, + reference.shape[3], reference.shape[2], + upscale_method='bicubic', + crop='center' + ) + + if batch_size == 0 or batch_size > image.shape[0]: + batch_size = image.shape[0] + + if "LAB" == color_space: + reference = kornia.color.rgb_to_lab(reference) + elif "YCbCr" == color_space: + reference = kornia.color.rgb_to_ycbcr(reference) + elif "LUV" == color_space: + reference = kornia.color.rgb_to_luv(reference) + elif "YUV" == color_space: + reference = kornia.color.rgb_to_yuv(reference) + elif "XYZ" == color_space: + reference = kornia.color.rgb_to_xyz(reference) + + reference_mean, reference_std = self.compute_mean_std(reference, reference_mask) + + image_batch = torch.split(image, batch_size, dim=0) + output = [] + + for image in image_batch: + image = image.to(device) + + if color_space == "LAB": + image = kornia.color.rgb_to_lab(image) + elif color_space == "YCbCr": + image = kornia.color.rgb_to_ycbcr(image) + elif color_space == "LUV": + image = kornia.color.rgb_to_luv(image) + elif color_space == "YUV": + image = kornia.color.rgb_to_yuv(image) + elif color_space == "XYZ": + image = kornia.color.rgb_to_xyz(image) + + image_mean, image_std = self.compute_mean_std(image) + + matched = torch.nan_to_num((image - image_mean) / image_std) * torch.nan_to_num(reference_std) + reference_mean + matched = factor * matched + (1 - factor) * image + + if color_space == "LAB": + matched = kornia.color.lab_to_rgb(matched) + elif color_space == "YCbCr": + matched = kornia.color.ycbcr_to_rgb(matched) + elif color_space == "LUV": + matched = kornia.color.luv_to_rgb(matched) + elif color_space == "YUV": + matched = kornia.color.yuv_to_rgb(matched) + elif color_space == "XYZ": + matched = kornia.color.xyz_to_rgb(matched) + + out = matched.permute([0, 2, 3, 1]).clamp(0, 1).to(comfy.model_management.intermediate_device()) + output.append(out) + + out = None + output = torch.cat(output, dim=0) + return (output,) + + def compute_mean_std(self, tensor, mask=None): + if mask is not None: + # Apply mask to the tensor + masked_tensor = tensor * mask + + # Calculate the sum of the mask for each channel + mask_sum = mask.sum(dim=[2, 3], keepdim=True) + + # Avoid division by zero + mask_sum = torch.clamp(mask_sum, min=1e-6) + + # Calculate mean and std only for masked area + mean = torch.nan_to_num(masked_tensor.sum(dim=[2, 3], keepdim=True) / mask_sum) + std = torch.sqrt(torch.nan_to_num(((masked_tensor - mean) ** 2 * mask).sum(dim=[2, 3], keepdim=True) / mask_sum)) + else: + mean = tensor.mean(dim=[2, 3], keepdim=True) + std = tensor.std(dim=[2, 3], keepdim=True) + return mean, std + +class ImageColorMatchAdobe(ImageColorMatch): + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "image": ("IMAGE",), + "reference": ("IMAGE",), + "color_space": (["RGB", "LAB"],), + "luminance_factor": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 2.0, "step": 0.05}), + "color_intensity_factor": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 2.0, "step": 0.05}), + "fade_factor": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.05}), + "neutralization_factor": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.05}), + "device": (["auto", "cpu", "gpu"],), + }, + "optional": { + "reference_mask": ("MASK",), + } + } + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "execute" + CATEGORY = "essentials/image processing" + + def analyze_color_statistics(self, image, mask=None): + # Assuming image is in RGB format + l, a, b = kornia.color.rgb_to_lab(image).chunk(3, dim=1) + + if mask is not None: + # Ensure mask is binary and has the same spatial dimensions as the image + mask = F.interpolate(mask, size=image.shape[2:], mode='nearest') + mask = (mask > 0.5).float() + + # Apply mask to each channel + l = l * mask + a = a * mask + b = b * mask + + # Compute masked mean and std + num_pixels = mask.sum() + mean_l = (l * mask).sum() / num_pixels + mean_a = (a * mask).sum() / num_pixels + mean_b = (b * mask).sum() / num_pixels + std_l = torch.sqrt(((l - mean_l)**2 * mask).sum() / num_pixels) + var_ab = ((a - mean_a)**2 + (b - mean_b)**2) * mask + std_ab = torch.sqrt(var_ab.sum() / num_pixels) + else: + mean_l = l.mean() + std_l = l.std() + mean_a = a.mean() + mean_b = b.mean() + std_ab = torch.sqrt(a.var() + b.var()) + + return mean_l, std_l, mean_a, mean_b, std_ab + + def apply_color_transformation(self, image, source_stats, dest_stats, L, C, N): + l, a, b = kornia.color.rgb_to_lab(image).chunk(3, dim=1) + + # Unpack statistics + src_mean_l, src_std_l, src_mean_a, src_mean_b, src_std_ab = source_stats + dest_mean_l, dest_std_l, dest_mean_a, dest_mean_b, dest_std_ab = dest_stats + + # Adjust luminance + l_new = (l - dest_mean_l) * (src_std_l / dest_std_l) * L + src_mean_l + + # Neutralize color cast + a = a - N * dest_mean_a + b = b - N * dest_mean_b + + # Adjust color intensity + a_new = a * (src_std_ab / dest_std_ab) * C + b_new = b * (src_std_ab / dest_std_ab) * C + + # Combine channels + lab_new = torch.cat([l_new, a_new, b_new], dim=1) + + # Convert back to RGB + rgb_new = kornia.color.lab_to_rgb(lab_new) + + return rgb_new + + def execute(self, image, reference, color_space, luminance_factor, color_intensity_factor, fade_factor, neutralization_factor, device, reference_mask=None): + if "gpu" == device: + device = comfy.model_management.get_torch_device() + elif "auto" == device: + device = comfy.model_management.intermediate_device() + else: + device = 'cpu' + + # Ensure image and reference are in the correct shape (B, C, H, W) + image = image.permute(0, 3, 1, 2).to(device) + reference = reference.permute(0, 3, 1, 2).to(device) + + # Handle reference_mask (if provided) + if reference_mask is not None: + # Ensure reference_mask is 4D (B, 1, H, W) + if reference_mask.ndim == 2: + reference_mask = reference_mask.unsqueeze(0).unsqueeze(0) + elif reference_mask.ndim == 3: + reference_mask = reference_mask.unsqueeze(1) + reference_mask = reference_mask.to(device) + + # Analyze color statistics + source_stats = self.analyze_color_statistics(reference, reference_mask) + dest_stats = self.analyze_color_statistics(image) + + # Apply color transformation + transformed = self.apply_color_transformation( + image, source_stats, dest_stats, + luminance_factor, color_intensity_factor, neutralization_factor + ) + + # Apply fade factor + result = fade_factor * transformed + (1 - fade_factor) * image + + # Convert back to (B, H, W, C) format and ensure values are in [0, 1] range + result = result.permute(0, 2, 3, 1).clamp(0, 1).to(comfy.model_management.intermediate_device()) + + return (result,) + + +class ImageHistogramMatch: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "image": ("IMAGE",), + "reference": ("IMAGE",), + "method": (["pytorch", "skimage"],), + "factor": ("FLOAT", { "default": 1.0, "min": 0.0, "max": 1.0, "step": 0.05, }), + "device": (["auto", "cpu", "gpu"],), + } + } + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "execute" + CATEGORY = "essentials/image processing" + + def execute(self, image, reference, method, factor, device): + if "gpu" == device: + device = comfy.model_management.get_torch_device() + elif "auto" == device: + device = comfy.model_management.intermediate_device() + else: + device = 'cpu' + + if "pytorch" in method: + from .histogram_matching import Histogram_Matching + + image = image.permute([0, 3, 1, 2]).to(device) + reference = reference.permute([0, 3, 1, 2]).to(device)[0].unsqueeze(0) + image.requires_grad = True + reference.requires_grad = True + + out = [] + + for i in image: + i = i.unsqueeze(0) + hm = Histogram_Matching(differentiable=True) + out.append(hm(i, reference)) + out = torch.cat(out, dim=0) + out = factor * out + (1 - factor) * image + out = out.permute([0, 2, 3, 1]).clamp(0, 1) + else: + from skimage.exposure import match_histograms + + out = torch.from_numpy(match_histograms(image.cpu().numpy(), reference.cpu().numpy(), channel_axis=3)).to(device) + out = factor * out + (1 - factor) * image.to(device) + + return (out.to(comfy.model_management.intermediate_device()),) + +""" +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + Utilities +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +""" + +class ImageToDevice: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "image": ("IMAGE",), + "device": (["auto", "cpu", "gpu"],), + } + } + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "execute" + CATEGORY = "essentials/image utils" + + def execute(self, image, device): + if "gpu" == device: + device = comfy.model_management.get_torch_device() + elif "auto" == device: + device = comfy.model_management.intermediate_device() + else: + device = 'cpu' + + image = image.clone().to(device) + torch.cuda.empty_cache() + + return (image,) + +class GetImageSize: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "image": ("IMAGE",), + } + } + + RETURN_TYPES = ("INT", "INT", "INT",) + RETURN_NAMES = ("width", "height", "count") + FUNCTION = "execute" + CATEGORY = "essentials/image utils" + + def execute(self, image): + return (image.shape[2], image.shape[1], image.shape[0]) + +class ImageRemoveAlpha: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "image": ("IMAGE",), + }, + } + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "execute" + CATEGORY = "essentials/image utils" + + def execute(self, image): + if image.shape[3] == 4: + image = image[..., :3] + return (image,) + +class ImagePreviewFromLatent(SaveImage): + def __init__(self): + self.output_dir = folder_paths.get_temp_directory() + self.type = "temp" + self.prefix_append = "_temp_" + ''.join(random.choice("abcdefghijklmnopqrstupvxyz") for x in range(5)) + self.compress_level = 1 + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "latent": ("LATENT",), + "vae": ("VAE", ), + "tile_size": ("INT", {"default": 0, "min": 0, "max": 4096, "step": 64}) + }, "optional": { + "image": (["none"], {"image_upload": False}), + }, "hidden": { + "prompt": "PROMPT", + "extra_pnginfo": "EXTRA_PNGINFO", + }, + } + + RETURN_TYPES = ("IMAGE", "MASK", "INT", "INT",) + RETURN_NAMES = ("IMAGE", "MASK", "width", "height",) + FUNCTION = "execute" + CATEGORY = "essentials/image utils" + + def execute(self, latent, vae, tile_size, prompt=None, extra_pnginfo=None, image=None, filename_prefix="ComfyUI"): + mask = torch.zeros((64,64), dtype=torch.float32, device="cpu") + ui = None + + if image.startswith("clipspace"): + image_path = folder_paths.get_annotated_filepath(image) + if not os.path.exists(image_path): + raise ValueError(f"Clipspace image does not exist anymore, select 'none' in the image field.") + + img = pillow(Image.open, image_path) + img = pillow(ImageOps.exif_transpose, img) + if img.mode == "I": + img = img.point(lambda i: i * (1 / 255)) + image = img.convert("RGB") + image = np.array(image).astype(np.float32) / 255.0 + image = torch.from_numpy(image)[None,] + if "A" in img.getbands(): + mask = np.array(img.getchannel('A')).astype(np.float32) / 255.0 + mask = 1. - torch.from_numpy(mask) + ui = { + "filename": os.path.basename(image_path), + "subfolder": os.path.dirname(image_path), + "type": "temp", + } + else: + if tile_size > 0: + tile_size = max(tile_size, 320) + image = vae.decode_tiled(latent["samples"], tile_x=tile_size // 8, tile_y=tile_size // 8, ) + else: + image = vae.decode(latent["samples"]) + ui = self.save_images(image, filename_prefix, prompt, extra_pnginfo) + + out = {**ui, "result": (image, mask, image.shape[2], image.shape[1],)} + return out + +class NoiseFromImage: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "image": ("IMAGE",), + "noise_strenght": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01 }), + "noise_size": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01 }), + "color_noise": ("FLOAT", {"default": 0.2, "min": 0.0, "max": 1.0, "step": 0.01 }), + "mask_strength": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01 }), + "mask_scale_diff": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.01 }), + "mask_contrast": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 100.0, "step": 0.1 }), + "saturation": ("FLOAT", {"default": 2.0, "min": 0.0, "max": 100.0, "step": 0.1 }), + "contrast": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 100.0, "step": 0.1 }), + "blur": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.1 }), + }, + "optional": { + "noise_mask": ("IMAGE",), + } + } + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "execute" + CATEGORY = "essentials/image utils" + + def execute(self, image, noise_size, color_noise, mask_strength, mask_scale_diff, mask_contrast, noise_strenght, saturation, contrast, blur, noise_mask=None): + torch.manual_seed(0) + + elastic_alpha = max(image.shape[1], image.shape[2])# * noise_size + elastic_sigma = elastic_alpha / 400 * noise_size + + blur_size = int(6 * blur+1) + if blur_size % 2 == 0: + blur_size+= 1 + + if noise_mask is None: + noise_mask = image + + # increase contrast of the mask + if mask_contrast != 1: + noise_mask = T.ColorJitter(contrast=(mask_contrast,mask_contrast))(noise_mask.permute([0, 3, 1, 2])).permute([0, 2, 3, 1]) + + # Ensure noise mask is the same size as the image + if noise_mask.shape[1:] != image.shape[1:]: + noise_mask = F.interpolate(noise_mask.permute([0, 3, 1, 2]), size=(image.shape[1], image.shape[2]), mode='bicubic', align_corners=False) + noise_mask = noise_mask.permute([0, 2, 3, 1]) + # Ensure we have the same number of masks and images + if noise_mask.shape[0] > image.shape[0]: + noise_mask = noise_mask[:image.shape[0]] + else: + noise_mask = torch.cat((noise_mask, noise_mask[-1:].repeat((image.shape[0]-noise_mask.shape[0], 1, 1, 1))), dim=0) + + # Convert mask to grayscale mask + noise_mask = noise_mask.mean(dim=3).unsqueeze(-1) + + # add color noise + imgs = image.clone().permute([0, 3, 1, 2]) + if color_noise > 0: + color_noise = torch.normal(torch.zeros_like(imgs), std=color_noise) + color_noise *= (imgs - imgs.min()) / (imgs.max() - imgs.min()) + + imgs = imgs + color_noise + imgs = imgs.clamp(0, 1) + + # create fine and coarse noise + fine_noise = [] + for n in imgs: + avg_color = n.mean(dim=[1,2]) + + tmp_noise = T.ElasticTransform(alpha=elastic_alpha, sigma=elastic_sigma, fill=avg_color.tolist())(n) + if blur > 0: + tmp_noise = T.GaussianBlur(blur_size, blur)(tmp_noise) + tmp_noise = T.ColorJitter(contrast=(contrast,contrast), saturation=(saturation,saturation))(tmp_noise) + fine_noise.append(tmp_noise) + + imgs = None + del imgs + + fine_noise = torch.stack(fine_noise, dim=0) + fine_noise = fine_noise.permute([0, 2, 3, 1]) + #fine_noise = torch.stack(fine_noise, dim=0) + #fine_noise = pb(fine_noise) + mask_scale_diff = min(mask_scale_diff, 0.99) + if mask_scale_diff > 0: + coarse_noise = F.interpolate(fine_noise.permute([0, 3, 1, 2]), scale_factor=1-mask_scale_diff, mode='area') + coarse_noise = F.interpolate(coarse_noise, size=(fine_noise.shape[1], fine_noise.shape[2]), mode='bilinear', align_corners=False) + coarse_noise = coarse_noise.permute([0, 2, 3, 1]) + else: + coarse_noise = fine_noise + + output = (1 - noise_mask) * coarse_noise + noise_mask * fine_noise + + if mask_strength < 1: + noise_mask = noise_mask.pow(mask_strength) + noise_mask = torch.nan_to_num(noise_mask).clamp(0, 1) + output = noise_mask * output + (1 - noise_mask) * image + + # apply noise to image + output = output * noise_strenght + image * (1 - noise_strenght) + output = output.clamp(0, 1) + + return (output, ) + +IMAGE_CLASS_MAPPINGS = { + # Image analysis + "ImageEnhanceDifference+": ImageEnhanceDifference, + + # Image batch + "ImageBatchMultiple+": ImageBatchMultiple, + "ImageExpandBatch+": ImageExpandBatch, + "ImageFromBatch+": ImageFromBatch, + "ImageListToBatch+": ImageListToBatch, + "ImageBatchToList+": ImageBatchToList, + + # Image manipulation + "ImageCompositeFromMaskBatch+": ImageCompositeFromMaskBatch, + "ImageComposite+": ImageComposite, + "ImageCrop+": ImageCrop, + "ImageFlip+": ImageFlip, + "ImageRandomTransform+": ImageRandomTransform, + "ImageRemoveAlpha+": ImageRemoveAlpha, + "ImageRemoveBackground+": ImageRemoveBackground, + "ImageResize+": ImageResize, + "ImageSeamCarving+": ImageSeamCarving, + "ImageTile+": ImageTile, + "ImageUntile+": ImageUntile, + "RemBGSession+": RemBGSession, + "TransparentBGSession+": TransparentBGSession, + + # Image processing + "ImageApplyLUT+": ImageApplyLUT, + "ImageCASharpening+": ImageCAS, + "ImageDesaturate+": ImageDesaturate, + "PixelOEPixelize+": PixelOEPixelize, + "ImagePosterize+": ImagePosterize, + "ImageColorMatch+": ImageColorMatch, + "ImageColorMatchAdobe+": ImageColorMatchAdobe, + "ImageHistogramMatch+": ImageHistogramMatch, + "ImageSmartSharpen+": ImageSmartSharpen, + + # Utilities + "GetImageSize+": GetImageSize, + "ImageToDevice+": ImageToDevice, + "ImagePreviewFromLatent+": ImagePreviewFromLatent, + "NoiseFromImage+": NoiseFromImage, + #"ExtractKeyframes+": ExtractKeyframes, +} + +IMAGE_NAME_MAPPINGS = { + # Image analysis + "ImageEnhanceDifference+": "πŸ”§ Image Enhance Difference", + + # Image batch + "ImageBatchMultiple+": "πŸ”§ Images Batch Multiple", + "ImageExpandBatch+": "πŸ”§ Image Expand Batch", + "ImageFromBatch+": "πŸ”§ Image From Batch", + "ImageListToBatch+": "πŸ”§ Image List To Batch", + "ImageBatchToList+": "πŸ”§ Image Batch To List", + + # Image manipulation + "ImageCompositeFromMaskBatch+": "πŸ”§ Image Composite From Mask Batch", + "ImageComposite+": "πŸ”§ Image Composite", + "ImageCrop+": "πŸ”§ Image Crop", + "ImageFlip+": "πŸ”§ Image Flip", + "ImageRandomTransform+": "πŸ”§ Image Random Transform", + "ImageRemoveAlpha+": "πŸ”§ Image Remove Alpha", + "ImageRemoveBackground+": "πŸ”§ Image Remove Background", + "ImageResize+": "πŸ”§ Image Resize", + "ImageSeamCarving+": "πŸ”§ Image Seam Carving", + "ImageTile+": "πŸ”§ Image Tile", + "ImageUntile+": "πŸ”§ Image Untile", + "RemBGSession+": "πŸ”§ RemBG Session", + "TransparentBGSession+": "πŸ”§ InSPyReNet TransparentBG", + + # Image processing + "ImageApplyLUT+": "πŸ”§ Image Apply LUT", + "ImageCASharpening+": "πŸ”§ Image Contrast Adaptive Sharpening", + "ImageDesaturate+": "πŸ”§ Image Desaturate", + "PixelOEPixelize+": "πŸ”§ Pixelize", + "ImagePosterize+": "πŸ”§ Image Posterize", + "ImageColorMatch+": "πŸ”§ Image Color Match", + "ImageColorMatchAdobe+": "πŸ”§ Image Color Match Adobe", + "ImageHistogramMatch+": "πŸ”§ Image Histogram Match", + "ImageSmartSharpen+": "πŸ”§ Image Smart Sharpen", + + # Utilities + "GetImageSize+": "πŸ”§ Get Image Size", + "ImageToDevice+": "πŸ”§ Image To Device", + "ImagePreviewFromLatent+": "πŸ”§ Image Preview From Latent", + "NoiseFromImage+": "πŸ”§ Noise From Image", +} diff --git a/custom_nodes/ComfyUI-essentials-main/js/DisplayAny.js b/custom_nodes/ComfyUI-essentials-main/js/DisplayAny.js new file mode 100644 index 0000000000000000000000000000000000000000..ccfa68c138d8fdc87e57132aed8a6a74f94c2dd1 --- /dev/null +++ b/custom_nodes/ComfyUI-essentials-main/js/DisplayAny.js @@ -0,0 +1,36 @@ +import { app } from "../../scripts/app.js"; +import { ComfyWidgets } from "../../scripts/widgets.js"; + +app.registerExtension({ + name: "essentials.DisplayAny", + async beforeRegisterNodeDef(nodeType, nodeData, app) { + if (!nodeData?.category?.startsWith("essentials")) { + return; + } + + if (nodeData.name === "DisplayAny") { + const onExecuted = nodeType.prototype.onExecuted; + + nodeType.prototype.onExecuted = function (message) { + onExecuted?.apply(this, arguments); + + if (this.widgets) { + for (let i = 1; i < this.widgets.length; i++) { + this.widgets[i].onRemove?.(); + } + this.widgets.length = 1; + } + + // Check if the "text" widget already exists. + let textWidget = this.widgets && this.widgets.find(w => w.name === "displaytext"); + if (!textWidget) { + textWidget = ComfyWidgets["STRING"](this, "displaytext", ["STRING", { multiline: true }], app).widget; + textWidget.inputEl.readOnly = true; + textWidget.inputEl.style.border = "none"; + textWidget.inputEl.style.backgroundColor = "transparent"; + } + textWidget.value = message["text"].join(""); + }; + } + }, +}); \ No newline at end of file diff --git a/custom_nodes/ComfyUI-essentials-main/js/FluxAttentionSeeker.js b/custom_nodes/ComfyUI-essentials-main/js/FluxAttentionSeeker.js new file mode 100644 index 0000000000000000000000000000000000000000..b31989c51ebcabd0b87ffd0b9f71f1a6ccb702a7 --- /dev/null +++ b/custom_nodes/ComfyUI-essentials-main/js/FluxAttentionSeeker.js @@ -0,0 +1,133 @@ +import { app } from "../../scripts/app.js"; + +app.registerExtension({ + name: "essentials.FluxAttentionSeeker", + async beforeRegisterNodeDef(nodeType, nodeData, app) { + if (!nodeData?.category?.startsWith("essentials")) { + return; + } + + if (nodeData.name === "FluxAttentionSeeker+") { + const onCreated = nodeType.prototype.onNodeCreated; + + nodeType.prototype.onNodeCreated = function () { + this.addWidget("button", "RESET ALL", null, () => { + this.widgets.forEach(w => { + if (w.type === "slider") { + w.value = 1.0; + } + }); + }); + + this.addWidget("button", "ZERO ALL", null, () => { + this.widgets.forEach(w => { + if (w.type === "slider") { + w.value = 0.0; + } + }); + }); + + this.addWidget("button", "REPEAT FIRST", null, () => { + var clip_value = undefined; + var t5_value = undefined; + this.widgets.forEach(w => { + if (w.name.startsWith('clip_l')) { + if (clip_value === undefined) { + clip_value = w.value; + } + w.value = clip_value; + } else if (w.name.startsWith('t5')) { + if (t5_value === undefined) { + t5_value = w.value; + } + w.value = t5_value; + } + }); + }); + }; + } + }, +}); + +app.registerExtension({ + name: "essentials.SD3AttentionSeekerLG", + async beforeRegisterNodeDef(nodeType, nodeData, app) { + if (!nodeData?.category?.startsWith("essentials")) { + return; + } + + if (nodeData.name === "SD3AttentionSeekerLG+") { + const onCreated = nodeType.prototype.onNodeCreated; + + nodeType.prototype.onNodeCreated = function () { + this.addWidget("button", "RESET L", null, () => { + this.widgets.forEach(w => { + if (w.type === "slider" && w.name.startsWith('clip_l')) { + w.value = 1.0; + } + }); + }); + this.addWidget("button", "RESET G", null, () => { + this.widgets.forEach(w => { + if (w.type === "slider" && w.name.startsWith('clip_g')) { + w.value = 1.0; + } + }); + }); + + this.addWidget("button", "REPEAT FIRST", null, () => { + var clip_l_value = undefined; + var clip_g_value = undefined; + this.widgets.forEach(w => { + if (w.name.startsWith('clip_l')) { + if (clip_l_value === undefined) { + clip_l_value = w.value; + } + w.value = clip_l_value; + } else if (w.name.startsWith('clip_g')) { + if (clip_g_value === undefined) { + clip_g_value = w.value; + } + w.value = clip_g_value; + } + }); + }); + }; + } + }, +}); + +app.registerExtension({ + name: "essentials.SD3AttentionSeekerT5", + async beforeRegisterNodeDef(nodeType, nodeData, app) { + if (!nodeData?.category?.startsWith("essentials")) { + return; + } + + if (nodeData.name === "SD3AttentionSeekerT5+") { + const onCreated = nodeType.prototype.onNodeCreated; + + nodeType.prototype.onNodeCreated = function () { + this.addWidget("button", "RESET ALL", null, () => { + this.widgets.forEach(w => { + if (w.type === "slider") { + w.value = 1.0; + } + }); + }); + + this.addWidget("button", "REPEAT FIRST", null, () => { + var t5_value = undefined; + this.widgets.forEach(w => { + if (w.name.startsWith('t5')) { + if (t5_value === undefined) { + t5_value = w.value; + } + w.value = t5_value; + } + }); + }); + }; + } + }, +}); \ No newline at end of file diff --git a/custom_nodes/ComfyUI-essentials-main/luts/put_luts_files_here.txt b/custom_nodes/ComfyUI-essentials-main/luts/put_luts_files_here.txt new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/custom_nodes/ComfyUI-essentials-main/mask.py b/custom_nodes/ComfyUI-essentials-main/mask.py new file mode 100644 index 0000000000000000000000000000000000000000..dcc85a27a97013123f73bb80d6f5b0e19399d8e4 --- /dev/null +++ b/custom_nodes/ComfyUI-essentials-main/mask.py @@ -0,0 +1,596 @@ +from nodes import SaveImage +import torch +import torchvision.transforms.v2 as T +import random +import folder_paths +import comfy.utils +from .image import ImageExpandBatch +from .utils import AnyType +import numpy as np +import scipy +from PIL import Image +from nodes import MAX_RESOLUTION +import math + +any = AnyType("*") + +class MaskBlur: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "mask": ("MASK",), + "amount": ("INT", { "default": 6, "min": 0, "max": 256, "step": 1, }), + "device": (["auto", "cpu", "gpu"],), + } + } + + RETURN_TYPES = ("MASK",) + FUNCTION = "execute" + CATEGORY = "essentials/mask" + + def execute(self, mask, amount, device): + if amount == 0: + return (mask,) + + if "gpu" == device: + mask = mask.to(comfy.model_management.get_torch_device()) + elif "cpu" == device: + mask = mask.to('cpu') + + if amount % 2 == 0: + amount+= 1 + + if mask.dim() == 2: + mask = mask.unsqueeze(0) + + mask = T.functional.gaussian_blur(mask.unsqueeze(1), amount).squeeze(1) + + if "gpu" == device or "cpu" == device: + mask = mask.to(comfy.model_management.intermediate_device()) + + return(mask,) + +class MaskFlip: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "mask": ("MASK",), + "axis": (["x", "y", "xy"],), + } + } + + RETURN_TYPES = ("MASK",) + FUNCTION = "execute" + CATEGORY = "essentials/mask" + + def execute(self, mask, axis): + if mask.dim() == 2: + mask = mask.unsqueeze(0) + + dim = () + if "y" in axis: + dim += (1,) + if "x" in axis: + dim += (2,) + mask = torch.flip(mask, dims=dim) + + return(mask,) + +class MaskPreview(SaveImage): + def __init__(self): + self.output_dir = folder_paths.get_temp_directory() + self.type = "temp" + self.prefix_append = "_temp_" + ''.join(random.choice("abcdefghijklmnopqrstupvxyz") for x in range(5)) + self.compress_level = 4 + + @classmethod + def INPUT_TYPES(s): + return { + "required": {"mask": ("MASK",), }, + "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"}, + } + + FUNCTION = "execute" + CATEGORY = "essentials/mask" + + def execute(self, mask, filename_prefix="ComfyUI", prompt=None, extra_pnginfo=None): + preview = mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])).movedim(1, -1).expand(-1, -1, -1, 3) + return self.save_images(preview, filename_prefix, prompt, extra_pnginfo) + +class MaskBatch: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "mask1": ("MASK",), + "mask2": ("MASK",), + } + } + + RETURN_TYPES = ("MASK",) + FUNCTION = "execute" + CATEGORY = "essentials/mask batch" + + def execute(self, mask1, mask2): + if mask1.shape[1:] != mask2.shape[1:]: + mask2 = comfy.utils.common_upscale(mask2.unsqueeze(1).expand(-1,3,-1,-1), mask1.shape[2], mask1.shape[1], upscale_method='bicubic', crop='center')[:,0,:,:] + + return (torch.cat((mask1, mask2), dim=0),) + +class MaskExpandBatch: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "mask": ("MASK",), + "size": ("INT", { "default": 16, "min": 1, "step": 1, }), + "method": (["expand", "repeat all", "repeat first", "repeat last"],) + } + } + + RETURN_TYPES = ("MASK",) + FUNCTION = "execute" + CATEGORY = "essentials/mask batch" + + def execute(self, mask, size, method): + return (ImageExpandBatch().execute(mask.unsqueeze(1).expand(-1,3,-1,-1), size, method)[0][:,0,:,:],) + + +class MaskBoundingBox: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "mask": ("MASK",), + "padding": ("INT", { "default": 0, "min": 0, "max": 4096, "step": 1, }), + "blur": ("INT", { "default": 0, "min": 0, "max": 256, "step": 1, }), + }, + "optional": { + "image_optional": ("IMAGE",), + } + } + + RETURN_TYPES = ("MASK", "IMAGE", "INT", "INT", "INT", "INT") + RETURN_NAMES = ("MASK", "IMAGE", "x", "y", "width", "height") + FUNCTION = "execute" + CATEGORY = "essentials/mask" + + def execute(self, mask, padding, blur, image_optional=None): + if mask.dim() == 2: + mask = mask.unsqueeze(0) + + if image_optional is None: + image_optional = mask.unsqueeze(3).repeat(1, 1, 1, 3) + + # resize the image if it's not the same size as the mask + if image_optional.shape[1:] != mask.shape[1:]: + image_optional = comfy.utils.common_upscale(image_optional.permute([0,3,1,2]), mask.shape[2], mask.shape[1], upscale_method='bicubic', crop='center').permute([0,2,3,1]) + + # match batch size + if image_optional.shape[0] < mask.shape[0]: + image_optional = torch.cat((image_optional, image_optional[-1].unsqueeze(0).repeat(mask.shape[0]-image_optional.shape[0], 1, 1, 1)), dim=0) + elif image_optional.shape[0] > mask.shape[0]: + image_optional = image_optional[:mask.shape[0]] + + # blur the mask + if blur > 0: + if blur % 2 == 0: + blur += 1 + mask = T.functional.gaussian_blur(mask.unsqueeze(1), blur).squeeze(1) + + _, y, x = torch.where(mask) + x1 = max(0, x.min().item() - padding) + x2 = min(mask.shape[2], x.max().item() + 1 + padding) + y1 = max(0, y.min().item() - padding) + y2 = min(mask.shape[1], y.max().item() + 1 + padding) + + # crop the mask + mask = mask[:, y1:y2, x1:x2] + image_optional = image_optional[:, y1:y2, x1:x2, :] + + return (mask, image_optional, x1, y1, x2 - x1, y2 - y1) + + +class MaskFromColor: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "image": ("IMAGE", ), + "red": ("INT", { "default": 255, "min": 0, "max": 255, "step": 1, }), + "green": ("INT", { "default": 255, "min": 0, "max": 255, "step": 1, }), + "blue": ("INT", { "default": 255, "min": 0, "max": 255, "step": 1, }), + "threshold": ("INT", { "default": 0, "min": 0, "max": 127, "step": 1, }), + } + } + + RETURN_TYPES = ("MASK",) + FUNCTION = "execute" + CATEGORY = "essentials/mask" + + def execute(self, image, red, green, blue, threshold): + temp = (torch.clamp(image, 0, 1.0) * 255.0).round().to(torch.int) + color = torch.tensor([red, green, blue]) + lower_bound = (color - threshold).clamp(min=0) + upper_bound = (color + threshold).clamp(max=255) + lower_bound = lower_bound.view(1, 1, 1, 3) + upper_bound = upper_bound.view(1, 1, 1, 3) + mask = (temp >= lower_bound) & (temp <= upper_bound) + mask = mask.all(dim=-1) + mask = mask.float() + + return (mask, ) + + +class MaskFromSegmentation: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "image": ("IMAGE", ), + "segments": ("INT", { "default": 6, "min": 1, "max": 16, "step": 1, }), + "remove_isolated_pixels": ("INT", { "default": 0, "min": 0, "max": 32, "step": 1, }), + "remove_small_masks": ("FLOAT", { "default": 0.0, "min": 0., "max": 1., "step": 0.01, }), + "fill_holes": ("BOOLEAN", { "default": False }), + } + } + + RETURN_TYPES = ("MASK",) + FUNCTION = "execute" + CATEGORY = "essentials/mask" + + def execute(self, image, segments, remove_isolated_pixels, fill_holes, remove_small_masks): + im = image[0] # we only work on the first image in the batch + im = Image.fromarray((im * 255).to(torch.uint8).cpu().numpy(), mode="RGB") + im = im.quantize(palette=im.quantize(colors=segments), dither=Image.Dither.NONE) + im = torch.tensor(np.array(im.convert("RGB"))).float() / 255.0 + + colors = im.reshape(-1, im.shape[-1]) + colors = torch.unique(colors, dim=0) + + masks = [] + for color in colors: + mask = (im == color).all(dim=-1).float() + # remove isolated pixels + if remove_isolated_pixels > 0: + mask = torch.from_numpy(scipy.ndimage.binary_opening(mask.cpu().numpy(), structure=np.ones((remove_isolated_pixels, remove_isolated_pixels)))) + + # fill holes + if fill_holes: + mask = torch.from_numpy(scipy.ndimage.binary_fill_holes(mask.cpu().numpy())) + + # if the mask is too small, it's probably noise + if mask.sum() / (mask.shape[0]*mask.shape[1]) > remove_small_masks: + masks.append(mask) + + if masks == []: + masks.append(torch.zeros_like(im)[:,:,0]) # return an empty mask if no masks were found, prevents errors + + mask = torch.stack(masks, dim=0).float() + + return (mask, ) + + +class MaskFix: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "mask": ("MASK",), + "erode_dilate": ("INT", { "default": 0, "min": -256, "max": 256, "step": 1, }), + "fill_holes": ("INT", { "default": 0, "min": 0, "max": 128, "step": 1, }), + "remove_isolated_pixels": ("INT", { "default": 0, "min": 0, "max": 32, "step": 1, }), + "smooth": ("INT", { "default": 0, "min": 0, "max": 256, "step": 1, }), + "blur": ("INT", { "default": 0, "min": 0, "max": 256, "step": 1, }), + } + } + + RETURN_TYPES = ("MASK",) + FUNCTION = "execute" + CATEGORY = "essentials/mask" + + def execute(self, mask, erode_dilate, smooth, remove_isolated_pixels, blur, fill_holes): + masks = [] + for m in mask: + # erode and dilate + if erode_dilate != 0: + if erode_dilate < 0: + m = torch.from_numpy(scipy.ndimage.grey_erosion(m.cpu().numpy(), size=(-erode_dilate, -erode_dilate))) + else: + m = torch.from_numpy(scipy.ndimage.grey_dilation(m.cpu().numpy(), size=(erode_dilate, erode_dilate))) + + # fill holes + if fill_holes > 0: + #m = torch.from_numpy(scipy.ndimage.binary_fill_holes(m.cpu().numpy(), structure=np.ones((fill_holes,fill_holes)))).float() + m = torch.from_numpy(scipy.ndimage.grey_closing(m.cpu().numpy(), size=(fill_holes, fill_holes))) + + # remove isolated pixels + if remove_isolated_pixels > 0: + m = torch.from_numpy(scipy.ndimage.grey_opening(m.cpu().numpy(), size=(remove_isolated_pixels, remove_isolated_pixels))) + + # smooth the mask + if smooth > 0: + if smooth % 2 == 0: + smooth += 1 + m = T.functional.gaussian_blur((m > 0.5).unsqueeze(0), smooth).squeeze(0) + + # blur the mask + if blur > 0: + if blur % 2 == 0: + blur += 1 + m = T.functional.gaussian_blur(m.float().unsqueeze(0), blur).squeeze(0) + + masks.append(m.float()) + + masks = torch.stack(masks, dim=0).float() + + return (masks, ) + +class MaskSmooth: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "mask": ("MASK",), + "amount": ("INT", { "default": 0, "min": 0, "max": 127, "step": 1, }), + } + } + + RETURN_TYPES = ("MASK",) + FUNCTION = "execute" + CATEGORY = "essentials/mask" + + def execute(self, mask, amount): + if amount == 0: + return (mask,) + + if amount % 2 == 0: + amount += 1 + + mask = mask > 0.5 + mask = T.functional.gaussian_blur(mask.unsqueeze(1), amount).squeeze(1).float() + + return (mask,) + +class MaskFromBatch: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "mask": ("MASK", ), + "start": ("INT", { "default": 0, "min": 0, "step": 1, }), + "length": ("INT", { "default": 1, "min": 1, "step": 1, }), + } + } + + RETURN_TYPES = ("MASK",) + FUNCTION = "execute" + CATEGORY = "essentials/mask batch" + + def execute(self, mask, start, length): + if length > mask.shape[0]: + length = mask.shape[0] + + start = min(start, mask.shape[0]-1) + length = min(mask.shape[0]-start, length) + return (mask[start:start + length], ) + +class MaskFromList: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "width": ("INT", { "default": 32, "min": 0, "max": MAX_RESOLUTION, "step": 8, }), + "height": ("INT", { "default": 32, "min": 0, "max": MAX_RESOLUTION, "step": 8, }), + }, "optional": { + "values": (any, { "default": 0.0, "min": 0.0, "max": 1.0, }), + "str_values": ("STRING", { "default": "", "multiline": True, "placeholder": "0.0, 0.5, 1.0",}), + } + } + + RETURN_TYPES = ("MASK",) + FUNCTION = "execute" + CATEGORY = "essentials/mask" + + def execute(self, width, height, values=None, str_values=""): + out = [] + + if values is not None: + if not isinstance(values, list): + out = [values] + else: + out.extend([float(v) for v in values]) + + if str_values != "": + str_values = [float(v) for v in str_values.split(",")] + out.extend(str_values) + + if out == []: + raise ValueError("No values provided") + + out = torch.tensor(out).float().clamp(0.0, 1.0) + out = out.view(-1, 1, 1).expand(-1, height, width) + + values = None + str_values = "" + + return (out, ) + +class MaskFromRGBCMYBW: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "image": ("IMAGE", ), + "threshold_r": ("FLOAT", { "default": 0.15, "min": 0.0, "max": 1, "step": 0.01, }), + "threshold_g": ("FLOAT", { "default": 0.15, "min": 0.0, "max": 1, "step": 0.01, }), + "threshold_b": ("FLOAT", { "default": 0.15, "min": 0.0, "max": 1, "step": 0.01, }), + } + } + + RETURN_TYPES = ("MASK","MASK","MASK","MASK","MASK","MASK","MASK","MASK",) + RETURN_NAMES = ("red","green","blue","cyan","magenta","yellow","black","white",) + FUNCTION = "execute" + CATEGORY = "essentials/mask" + + def execute(self, image, threshold_r, threshold_g, threshold_b): + red = ((image[..., 0] >= 1-threshold_r) & (image[..., 1] < threshold_g) & (image[..., 2] < threshold_b)).float() + green = ((image[..., 0] < threshold_r) & (image[..., 1] >= 1-threshold_g) & (image[..., 2] < threshold_b)).float() + blue = ((image[..., 0] < threshold_r) & (image[..., 1] < threshold_g) & (image[..., 2] >= 1-threshold_b)).float() + + cyan = ((image[..., 0] < threshold_r) & (image[..., 1] >= 1-threshold_g) & (image[..., 2] >= 1-threshold_b)).float() + magenta = ((image[..., 0] >= 1-threshold_r) & (image[..., 1] < threshold_g) & (image[..., 2] > 1-threshold_b)).float() + yellow = ((image[..., 0] >= 1-threshold_r) & (image[..., 1] >= 1-threshold_g) & (image[..., 2] < threshold_b)).float() + + black = ((image[..., 0] <= threshold_r) & (image[..., 1] <= threshold_g) & (image[..., 2] <= threshold_b)).float() + white = ((image[..., 0] >= 1-threshold_r) & (image[..., 1] >= 1-threshold_g) & (image[..., 2] >= 1-threshold_b)).float() + + return (red, green, blue, cyan, magenta, yellow, black, white,) + +class TransitionMask: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "width": ("INT", { "default": 512, "min": 1, "max": MAX_RESOLUTION, "step": 1, }), + "height": ("INT", { "default": 512, "min": 1, "max": MAX_RESOLUTION, "step": 1, }), + "frames": ("INT", { "default": 16, "min": 1, "max": 9999, "step": 1, }), + "start_frame": ("INT", { "default": 0, "min": 0, "step": 1, }), + "end_frame": ("INT", { "default": 9999, "min": 0, "step": 1, }), + "transition_type": (["horizontal slide", "vertical slide", "horizontal bar", "vertical bar", "center box", "horizontal door", "vertical door", "circle", "fade"],), + "timing_function": (["linear", "in", "out", "in-out"],) + } + } + + RETURN_TYPES = ("MASK",) + FUNCTION = "execute" + CATEGORY = "essentials/mask" + + def linear(self, i, t): + return i/t + def ease_in(self, i, t): + return pow(i/t, 2) + def ease_out(self, i, t): + return 1 - pow(1 - i/t, 2) + def ease_in_out(self, i, t): + if i < t/2: + return pow(i/(t/2), 2) / 2 + else: + return 1 - pow(1 - (i - t/2)/(t/2), 2) / 2 + + def execute(self, width, height, frames, start_frame, end_frame, transition_type, timing_function): + if timing_function == 'in': + timing_function = self.ease_in + elif timing_function == 'out': + timing_function = self.ease_out + elif timing_function == 'in-out': + timing_function = self.ease_in_out + else: + timing_function = self.linear + + out = [] + + end_frame = min(frames, end_frame) + transition = end_frame - start_frame + + if start_frame > 0: + out = out + [torch.full((height, width), 0.0, dtype=torch.float32, device="cpu")] * start_frame + + for i in range(transition): + frame = torch.full((height, width), 0.0, dtype=torch.float32, device="cpu") + progress = timing_function(i, transition-1) + + if "horizontal slide" in transition_type: + pos = round(width*progress) + frame[:, :pos] = 1.0 + elif "vertical slide" in transition_type: + pos = round(height*progress) + frame[:pos, :] = 1.0 + elif "box" in transition_type: + box_w = round(width*progress) + box_h = round(height*progress) + x1 = (width - box_w) // 2 + y1 = (height - box_h) // 2 + x2 = x1 + box_w + y2 = y1 + box_h + frame[y1:y2, x1:x2] = 1.0 + elif "circle" in transition_type: + radius = math.ceil(math.sqrt(pow(width,2)+pow(height,2))*progress/2) + c_x = width // 2 + c_y = height // 2 + # is this real life? Am I hallucinating? + x = torch.arange(0, width, dtype=torch.float32, device="cpu") + y = torch.arange(0, height, dtype=torch.float32, device="cpu") + y, x = torch.meshgrid((y, x), indexing="ij") + circle = ((x - c_x) ** 2 + (y - c_y) ** 2) <= (radius ** 2) + frame[circle] = 1.0 + elif "horizontal bar" in transition_type: + bar = round(height*progress) + y1 = (height - bar) // 2 + y2 = y1 + bar + frame[y1:y2, :] = 1.0 + elif "vertical bar" in transition_type: + bar = round(width*progress) + x1 = (width - bar) // 2 + x2 = x1 + bar + frame[:, x1:x2] = 1.0 + elif "horizontal door" in transition_type: + bar = math.ceil(height*progress/2) + if bar > 0: + frame[:bar, :] = 1.0 + frame[-bar:, :] = 1.0 + elif "vertical door" in transition_type: + bar = math.ceil(width*progress/2) + if bar > 0: + frame[:, :bar] = 1.0 + frame[:, -bar:] = 1.0 + elif "fade" in transition_type: + frame[:,:] = progress + + out.append(frame) + + if end_frame < frames: + out = out + [torch.full((height, width), 1.0, dtype=torch.float32, device="cpu")] * (frames - end_frame) + + out = torch.stack(out, dim=0) + + return (out, ) + +MASK_CLASS_MAPPINGS = { + "MaskBlur+": MaskBlur, + "MaskBoundingBox+": MaskBoundingBox, + "MaskFix+": MaskFix, + "MaskFlip+": MaskFlip, + "MaskFromColor+": MaskFromColor, + "MaskFromList+": MaskFromList, + "MaskFromRGBCMYBW+": MaskFromRGBCMYBW, + "MaskFromSegmentation+": MaskFromSegmentation, + "MaskPreview+": MaskPreview, + "MaskSmooth+": MaskSmooth, + "TransitionMask+": TransitionMask, + + # Batch + "MaskBatch+": MaskBatch, + "MaskExpandBatch+": MaskExpandBatch, + "MaskFromBatch+": MaskFromBatch, +} + +MASK_NAME_MAPPINGS = { + "MaskBlur+": "πŸ”§ Mask Blur", + "MaskFix+": "πŸ”§ Mask Fix", + "MaskFlip+": "πŸ”§ Mask Flip", + "MaskFromColor+": "πŸ”§ Mask From Color", + "MaskFromList+": "πŸ”§ Mask From List", + "MaskFromRGBCMYBW+": "πŸ”§ Mask From RGB/CMY/BW", + "MaskFromSegmentation+": "πŸ”§ Mask From Segmentation", + "MaskPreview+": "πŸ”§ Mask Preview", + "MaskBoundingBox+": "πŸ”§ Mask Bounding Box", + "MaskSmooth+": "πŸ”§ Mask Smooth", + "TransitionMask+": "πŸ”§ Transition Mask", + + "MaskBatch+": "πŸ”§ Mask Batch", + "MaskExpandBatch+": "πŸ”§ Mask Expand Batch", + "MaskFromBatch+": "πŸ”§ Mask From Batch", +} diff --git a/custom_nodes/ComfyUI-essentials-main/misc.py b/custom_nodes/ComfyUI-essentials-main/misc.py new file mode 100644 index 0000000000000000000000000000000000000000..9a0670edc3bcd041e31f41577bf0b259ebe12412 --- /dev/null +++ b/custom_nodes/ComfyUI-essentials-main/misc.py @@ -0,0 +1,574 @@ +import math +import torch +from .utils import AnyType +import comfy.model_management +from nodes import MAX_RESOLUTION +import time + +any = AnyType("*") + +class SimpleMathFloat: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "value": ("FLOAT", { "default": 0.0, "min": -0xffffffffffffffff, "max": 0xffffffffffffffff, "step": 0.05 }), + }, + } + + RETURN_TYPES = ("FLOAT", ) + FUNCTION = "execute" + CATEGORY = "essentials/utilities" + + def execute(self, value): + return (float(value), ) + +class SimpleMathPercent: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "value": ("FLOAT", { "default": 0.0, "min": 0, "max": 1, "step": 0.05 }), + }, + } + + RETURN_TYPES = ("FLOAT", ) + FUNCTION = "execute" + CATEGORY = "essentials/utilities" + + def execute(self, value): + return (float(value), ) + +class SimpleMathInt: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "value": ("INT", { "default": 0, "min": -0xffffffffffffffff, "max": 0xffffffffffffffff, "step": 1 }), + }, + } + + RETURN_TYPES = ("INT",) + FUNCTION = "execute" + CATEGORY = "essentials/utilities" + + def execute(self, value): + return (int(value), ) + +class SimpleMathSlider: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "value": ("FLOAT", { "display": "slider", "default": 0.5, "min": 0.0, "max": 1.0, "step": 0.001 }), + "min": ("FLOAT", { "default": 0.0, "min": -0xffffffffffffffff, "max": 0xffffffffffffffff, "step": 0.001 }), + "max": ("FLOAT", { "default": 1.0, "min": -0xffffffffffffffff, "max": 0xffffffffffffffff, "step": 0.001 }), + "rounding": ("INT", { "default": 0, "min": 0, "max": 10, "step": 1 }), + }, + } + + RETURN_TYPES = ("FLOAT", "INT",) + FUNCTION = "execute" + CATEGORY = "essentials/utilities" + + def execute(self, value, min, max, rounding): + value = min + value * (max - min) + + if rounding > 0: + value = round(value, rounding) + + return (value, int(value), ) + +class SimpleMathSliderLowRes: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "value": ("INT", { "display": "slider", "default": 5, "min": 0, "max": 10, "step": 1 }), + "min": ("FLOAT", { "default": 0.0, "min": -0xffffffffffffffff, "max": 0xffffffffffffffff, "step": 0.001 }), + "max": ("FLOAT", { "default": 1.0, "min": -0xffffffffffffffff, "max": 0xffffffffffffffff, "step": 0.001 }), + "rounding": ("INT", { "default": 0, "min": 0, "max": 10, "step": 1 }), + }, + } + + RETURN_TYPES = ("FLOAT", "INT",) + FUNCTION = "execute" + CATEGORY = "essentials/utilities" + + def execute(self, value, min, max, rounding): + value = 0.1 * value + value = min + value * (max - min) + if rounding > 0: + value = round(value, rounding) + + return (value, ) + +class SimpleMathBoolean: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "value": ("BOOLEAN", { "default": False }), + }, + } + + RETURN_TYPES = ("BOOLEAN",) + FUNCTION = "execute" + CATEGORY = "essentials/utilities" + + def execute(self, value): + return (value, int(value), ) + +class SimpleMath: + @classmethod + def INPUT_TYPES(s): + return { + "optional": { + "a": (any, { "default": 0.0 }), + "b": (any, { "default": 0.0 }), + "c": (any, { "default": 0.0 }), + }, + "required": { + "value": ("STRING", { "multiline": False, "default": "" }), + }, + } + + RETURN_TYPES = ("INT", "FLOAT", ) + FUNCTION = "execute" + CATEGORY = "essentials/utilities" + + def execute(self, value, a = 0.0, b = 0.0, c = 0.0, d = 0.0): + import ast + import operator as op + + h, w = 0.0, 0.0 + if hasattr(a, 'shape'): + a = list(a.shape) + if hasattr(b, 'shape'): + b = list(b.shape) + if hasattr(c, 'shape'): + c = list(c.shape) + if hasattr(d, 'shape'): + d = list(d.shape) + + if isinstance(a, str): + a = float(a) + if isinstance(b, str): + b = float(b) + if isinstance(c, str): + c = float(c) + if isinstance(d, str): + d = float(d) + + operators = { + ast.Add: op.add, + ast.Sub: op.sub, + ast.Mult: op.mul, + ast.Div: op.truediv, + ast.FloorDiv: op.floordiv, + ast.Pow: op.pow, + #ast.BitXor: op.xor, + #ast.BitOr: op.or_, + #ast.BitAnd: op.and_, + ast.USub: op.neg, + ast.Mod: op.mod, + ast.Eq: op.eq, + ast.NotEq: op.ne, + ast.Lt: op.lt, + ast.LtE: op.le, + ast.Gt: op.gt, + ast.GtE: op.ge, + ast.And: lambda x, y: x and y, + ast.Or: lambda x, y: x or y, + ast.Not: op.not_ + } + + op_functions = { + 'min': min, + 'max': max, + 'round': round, + 'sum': sum, + 'len': len, + } + + def eval_(node): + if isinstance(node, ast.Num): # number + return node.n + elif isinstance(node, ast.Name): # variable + if node.id == "a": + return a + if node.id == "b": + return b + if node.id == "c": + return c + if node.id == "d": + return d + elif isinstance(node, ast.BinOp): # + return operators[type(node.op)](eval_(node.left), eval_(node.right)) + elif isinstance(node, ast.UnaryOp): # e.g., -1 + return operators[type(node.op)](eval_(node.operand)) + elif isinstance(node, ast.Compare): # comparison operators + left = eval_(node.left) + for op, comparator in zip(node.ops, node.comparators): + if not operators[type(op)](left, eval_(comparator)): + return 0 + return 1 + elif isinstance(node, ast.BoolOp): # boolean operators (And, Or) + values = [eval_(value) for value in node.values] + return operators[type(node.op)](*values) + elif isinstance(node, ast.Call): # custom function + if node.func.id in op_functions: + args =[eval_(arg) for arg in node.args] + return op_functions[node.func.id](*args) + elif isinstance(node, ast.Subscript): # indexing or slicing + value = eval_(node.value) + if isinstance(node.slice, ast.Constant): + return value[node.slice.value] + else: + return 0 + else: + return 0 + + result = eval_(ast.parse(value, mode='eval').body) + + if math.isnan(result): + result = 0.0 + + return (round(result), result, ) + +class SimpleMathDual: + @classmethod + def INPUT_TYPES(s): + return { + "optional": { + "a": (any, { "default": 0.0 }), + "b": (any, { "default": 0.0 }), + "c": (any, { "default": 0.0 }), + "d": (any, { "default": 0.0 }), + }, + "required": { + "value_1": ("STRING", { "multiline": False, "default": "" }), + "value_2": ("STRING", { "multiline": False, "default": "" }), + }, + } + + RETURN_TYPES = ("INT", "FLOAT", "INT", "FLOAT", ) + RETURN_NAMES = ("int_1", "float_1", "int_2", "float_2" ) + FUNCTION = "execute" + CATEGORY = "essentials/utilities" + + def execute(self, value_1, value_2, a = 0.0, b = 0.0, c = 0.0, d = 0.0): + return SimpleMath().execute(value_1, a, b, c, d) + SimpleMath().execute(value_2, a, b, c, d) + +class SimpleMathCondition: + @classmethod + def INPUT_TYPES(s): + return { + "optional": { + "a": (any, { "default": 0.0 }), + "b": (any, { "default": 0.0 }), + "c": (any, { "default": 0.0 }), + }, + "required": { + "evaluate": (any, {"default": 0}), + "on_true": ("STRING", { "multiline": False, "default": "" }), + "on_false": ("STRING", { "multiline": False, "default": "" }), + }, + } + + RETURN_TYPES = ("INT", "FLOAT", ) + FUNCTION = "execute" + CATEGORY = "essentials/utilities" + + def execute(self, evaluate, on_true, on_false, a = 0.0, b = 0.0, c = 0.0): + return SimpleMath().execute(on_true if evaluate else on_false, a, b, c) + +class SimpleCondition: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "evaluate": (any, {"default": 0}), + "on_true": (any, {"default": 0}), + }, + "optional": { + "on_false": (any, {"default": None}), + }, + } + + RETURN_TYPES = (any,) + RETURN_NAMES = ("result",) + FUNCTION = "execute" + + CATEGORY = "essentials/utilities" + + def execute(self, evaluate, on_true, on_false=None): + from comfy_execution.graph import ExecutionBlocker + if not evaluate: + return (on_false if on_false is not None else ExecutionBlocker(None),) + + return (on_true,) + +class SimpleComparison: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "a": (any, {"default": 0}), + "b": (any, {"default": 0}), + "comparison": (["==", "!=", "<", "<=", ">", ">="],), + }, + } + + RETURN_TYPES = ("BOOLEAN",) + FUNCTION = "execute" + + CATEGORY = "essentials/utilities" + + def execute(self, a, b, comparison): + if comparison == "==": + return (a == b,) + elif comparison == "!=": + return (a != b,) + elif comparison == "<": + return (a < b,) + elif comparison == "<=": + return (a <= b,) + elif comparison == ">": + return (a > b,) + elif comparison == ">=": + return (a >= b,) + +class ConsoleDebug: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "value": (any, {}), + }, + "optional": { + "prefix": ("STRING", { "multiline": False, "default": "Value:" }) + } + } + + RETURN_TYPES = () + FUNCTION = "execute" + CATEGORY = "essentials/utilities" + OUTPUT_NODE = True + + def execute(self, value, prefix): + print(f"\033[96m{prefix} {value}\033[0m") + + return (None,) + +class DebugTensorShape: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "tensor": (any, {}), + }, + } + + RETURN_TYPES = () + FUNCTION = "execute" + CATEGORY = "essentials/utilities" + OUTPUT_NODE = True + + def execute(self, tensor): + shapes = [] + def tensorShape(tensor): + if isinstance(tensor, dict): + for k in tensor: + tensorShape(tensor[k]) + elif isinstance(tensor, list): + for i in range(len(tensor)): + tensorShape(tensor[i]) + elif hasattr(tensor, 'shape'): + shapes.append(list(tensor.shape)) + + tensorShape(tensor) + + print(f"\033[96mShapes found: {shapes}\033[0m") + + return (None,) + +class BatchCount: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "batch": (any, {}), + }, + } + + RETURN_TYPES = ("INT",) + FUNCTION = "execute" + CATEGORY = "essentials/utilities" + + def execute(self, batch): + count = 0 + if hasattr(batch, 'shape'): + count = batch.shape[0] + elif isinstance(batch, dict) and 'samples' in batch: + count = batch['samples'].shape[0] + elif isinstance(batch, list) or isinstance(batch, dict): + count = len(batch) + + return (count, ) + +class ModelCompile(): + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "model": ("MODEL",), + "fullgraph": ("BOOLEAN", { "default": False }), + "dynamic": ("BOOLEAN", { "default": False }), + "mode": (["default", "reduce-overhead", "max-autotune", "max-autotune-no-cudagraphs"],), + }, + } + + RETURN_TYPES = ("MODEL", ) + FUNCTION = "execute" + CATEGORY = "essentials/utilities" + + def execute(self, model, fullgraph, dynamic, mode): + work_model = model.clone() + torch._dynamo.config.suppress_errors = True + work_model.add_object_patch("diffusion_model", torch.compile(model=work_model.get_model_object("diffusion_model"), dynamic=dynamic, fullgraph=fullgraph, mode=mode)) + return (work_model, ) + +class RemoveLatentMask: + @classmethod + def INPUT_TYPES(s): + return {"required": { "samples": ("LATENT",),}} + RETURN_TYPES = ("LATENT",) + FUNCTION = "execute" + + CATEGORY = "essentials/utilities" + + def execute(self, samples): + s = samples.copy() + if "noise_mask" in s: + del s["noise_mask"] + + return (s,) + +class SDXLEmptyLatentSizePicker: + def __init__(self): + self.device = comfy.model_management.intermediate_device() + + @classmethod + def INPUT_TYPES(s): + return {"required": { + "resolution": (["704x1408 (0.5)","704x1344 (0.52)","768x1344 (0.57)","768x1280 (0.6)","832x1216 (0.68)","832x1152 (0.72)","896x1152 (0.78)","896x1088 (0.82)","960x1088 (0.88)","960x1024 (0.94)","1024x1024 (1.0)","1024x960 (1.07)","1088x960 (1.13)","1088x896 (1.21)","1152x896 (1.29)","1152x832 (1.38)","1216x832 (1.46)","1280x768 (1.67)","1344x768 (1.75)","1344x704 (1.91)","1408x704 (2.0)","1472x704 (2.09)","1536x640 (2.4)","1600x640 (2.5)","1664x576 (2.89)","1728x576 (3.0)",], {"default": "1024x1024 (1.0)"}), + "batch_size": ("INT", {"default": 1, "min": 1, "max": 4096}), + "width_override": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}), + "height_override": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}), + }} + + RETURN_TYPES = ("LATENT","INT","INT",) + RETURN_NAMES = ("LATENT","width","height",) + FUNCTION = "execute" + CATEGORY = "essentials/utilities" + + def execute(self, resolution, batch_size, width_override=0, height_override=0): + width, height = resolution.split(" ")[0].split("x") + width = width_override if width_override > 0 else int(width) + height = height_override if height_override > 0 else int(height) + + latent = torch.zeros([batch_size, 4, height // 8, width // 8], device=self.device) + + return ({"samples":latent}, width, height,) + +class DisplayAny: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "input": (("*",{})), + "mode": (["raw value", "tensor shape"],), + }, + } + + @classmethod + def VALIDATE_INPUTS(s, input_types): + return True + + RETURN_TYPES = ("STRING",) + FUNCTION = "execute" + OUTPUT_NODE = True + + CATEGORY = "essentials/utilities" + + def execute(self, input, mode): + if mode == "tensor shape": + text = [] + def tensorShape(tensor): + if isinstance(tensor, dict): + for k in tensor: + tensorShape(tensor[k]) + elif isinstance(tensor, list): + for i in range(len(tensor)): + tensorShape(tensor[i]) + elif hasattr(tensor, 'shape'): + text.append(list(tensor.shape)) + + tensorShape(input) + input = text + + text = str(input) + + return {"ui": {"text": text}, "result": (text,)} + +MISC_CLASS_MAPPINGS = { + "BatchCount+": BatchCount, + "ConsoleDebug+": ConsoleDebug, + "DebugTensorShape+": DebugTensorShape, + "DisplayAny": DisplayAny, + "ModelCompile+": ModelCompile, + "RemoveLatentMask+": RemoveLatentMask, + "SDXLEmptyLatentSizePicker+": SDXLEmptyLatentSizePicker, + "SimpleComparison+": SimpleComparison, + "SimpleCondition+": SimpleCondition, + "SimpleMath+": SimpleMath, + "SimpleMathDual+": SimpleMathDual, + "SimpleMathCondition+": SimpleMathCondition, + "SimpleMathBoolean+": SimpleMathBoolean, + "SimpleMathFloat+": SimpleMathFloat, + "SimpleMathInt+": SimpleMathInt, + "SimpleMathPercent+": SimpleMathPercent, + "SimpleMathSlider+": SimpleMathSlider, + "SimpleMathSliderLowRes+": SimpleMathSliderLowRes, +} + +MISC_NAME_MAPPINGS = { + "BatchCount+": "πŸ”§ Batch Count", + "ConsoleDebug+": "πŸ”§ Console Debug", + "DebugTensorShape+": "πŸ”§ Debug Tensor Shape", + "DisplayAny": "πŸ”§ Display Any", + "ModelCompile+": "πŸ”§ Model Compile", + "RemoveLatentMask+": "πŸ”§ Remove Latent Mask", + "SDXLEmptyLatentSizePicker+": "πŸ”§ Empty Latent Size Picker", + "SimpleComparison+": "πŸ”§ Simple Comparison", + "SimpleCondition+": "πŸ”§ Simple Condition", + "SimpleMath+": "πŸ”§ Simple Math", + "SimpleMathDual+": "πŸ”§ Simple Math Dual", + "SimpleMathCondition+": "πŸ”§ Simple Math Condition", + "SimpleMathBoolean+": "πŸ”§ Simple Math Boolean", + "SimpleMathFloat+": "πŸ”§ Simple Math Float", + "SimpleMathInt+": "πŸ”§ Simple Math Int", + "SimpleMathPercent+": "πŸ”§ Simple Math Percent", + "SimpleMathSlider+": "πŸ”§ Simple Math Slider", + "SimpleMathSliderLowRes+": "πŸ”§ Simple Math Slider low-res", +} \ No newline at end of file diff --git a/custom_nodes/ComfyUI-essentials-main/pyproject.toml b/custom_nodes/ComfyUI-essentials-main/pyproject.toml new file mode 100644 index 0000000000000000000000000000000000000000..affa1310b05b6f70f302a84771918984499defae --- /dev/null +++ b/custom_nodes/ComfyUI-essentials-main/pyproject.toml @@ -0,0 +1,15 @@ +[project] +name = "comfyui_essentials" +description = "Essential nodes that are weirdly missing from ComfyUI core. With few exceptions they are new features and not commodities." +version = "1.1.0" +license = { file = "LICENSE" } +dependencies = ["numba", "colour-science", "rembg", "pixeloe"] + +[project.urls] +Repository = "https://github.com/cubiq/ComfyUI_essentials" +# Used by Comfy Registry https://comfyregistry.org + +[tool.comfy] +PublisherId = "matteo" +DisplayName = "ComfyUI_essentials" +Icon = "" diff --git a/custom_nodes/ComfyUI-essentials-main/requirements.txt b/custom_nodes/ComfyUI-essentials-main/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..ca6bb9baa5e2f119c4c286cbb193cf7c4b72ceb2 --- /dev/null +++ b/custom_nodes/ComfyUI-essentials-main/requirements.txt @@ -0,0 +1,5 @@ +numba +colour-science +rembg +pixeloe +transparent-background \ No newline at end of file diff --git a/custom_nodes/ComfyUI-essentials-main/sampling.py b/custom_nodes/ComfyUI-essentials-main/sampling.py new file mode 100644 index 0000000000000000000000000000000000000000..f4244ea9ee45dee04fd81ba37038c508da05e346 --- /dev/null +++ b/custom_nodes/ComfyUI-essentials-main/sampling.py @@ -0,0 +1,811 @@ +import os +import comfy.samplers +import comfy.sample +import torch +from nodes import common_ksampler, CLIPTextEncode +from comfy.utils import ProgressBar +from .utils import expand_mask, FONTS_DIR, parse_string_to_list +import torchvision.transforms.v2 as T +import torch.nn.functional as F +import logging +import folder_paths + +# From https://github.com/BlenderNeko/ComfyUI_Noise/ +def slerp(val, low, high): + dims = low.shape + + low = low.reshape(dims[0], -1) + high = high.reshape(dims[0], -1) + + low_norm = low/torch.norm(low, dim=1, keepdim=True) + high_norm = high/torch.norm(high, dim=1, keepdim=True) + + low_norm[low_norm != low_norm] = 0.0 + high_norm[high_norm != high_norm] = 0.0 + + omega = torch.acos((low_norm*high_norm).sum(1)) + so = torch.sin(omega) + res = (torch.sin((1.0-val)*omega)/so).unsqueeze(1)*low + (torch.sin(val*omega)/so).unsqueeze(1) * high + + return res.reshape(dims) + +class KSamplerVariationsWithNoise: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "model": ("MODEL", ), + "latent_image": ("LATENT", ), + "main_seed": ("INT:seed", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "steps": ("INT", {"default": 20, "min": 1, "max": 10000}), + "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0, "step":0.1, "round": 0.01}), + "sampler_name": (comfy.samplers.KSampler.SAMPLERS, ), + "scheduler": (comfy.samplers.KSampler.SCHEDULERS, ), + "positive": ("CONDITIONING", ), + "negative": ("CONDITIONING", ), + "variation_strength": ("FLOAT", {"default": 0.17, "min": 0.0, "max": 1.0, "step":0.01, "round": 0.01}), + #"start_at_step": ("INT", {"default": 0, "min": 0, "max": 10000}), + #"end_at_step": ("INT", {"default": 10000, "min": 0, "max": 10000}), + #"return_with_leftover_noise": (["disable", "enable"], ), + "variation_seed": ("INT:seed", {"default": 12345, "min": 0, "max": 0xffffffffffffffff}), + "denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step":0.01, "round": 0.01}), + }} + + RETURN_TYPES = ("LATENT",) + FUNCTION = "execute" + CATEGORY = "essentials/sampling" + + def prepare_mask(self, mask, shape): + mask = torch.nn.functional.interpolate(mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])), size=(shape[2], shape[3]), mode="bilinear") + mask = mask.expand((-1,shape[1],-1,-1)) + if mask.shape[0] < shape[0]: + mask = mask.repeat((shape[0] -1) // mask.shape[0] + 1, 1, 1, 1)[:shape[0]] + return mask + + def execute(self, model, latent_image, main_seed, steps, cfg, sampler_name, scheduler, positive, negative, variation_strength, variation_seed, denoise): + if main_seed == variation_seed: + variation_seed += 1 + + end_at_step = steps #min(steps, end_at_step) + start_at_step = round(end_at_step - end_at_step * denoise) + + force_full_denoise = True + disable_noise = True + + device = comfy.model_management.get_torch_device() + + # Generate base noise + batch_size, _, height, width = latent_image["samples"].shape + generator = torch.manual_seed(main_seed) + base_noise = torch.randn((1, 4, height, width), dtype=torch.float32, device="cpu", generator=generator).repeat(batch_size, 1, 1, 1).cpu() + + # Generate variation noise + generator = torch.manual_seed(variation_seed) + variation_noise = torch.randn((batch_size, 4, height, width), dtype=torch.float32, device="cpu", generator=generator).cpu() + + slerp_noise = slerp(variation_strength, base_noise, variation_noise) + + # Calculate sigma + comfy.model_management.load_model_gpu(model) + sampler = comfy.samplers.KSampler(model, steps=steps, device=device, sampler=sampler_name, scheduler=scheduler, denoise=1.0, model_options=model.model_options) + sigmas = sampler.sigmas + sigma = sigmas[start_at_step] - sigmas[end_at_step] + sigma /= model.model.latent_format.scale_factor + sigma = sigma.detach().cpu().item() + + work_latent = latent_image.copy() + work_latent["samples"] = latent_image["samples"].clone() + slerp_noise * sigma + + # if there's a mask we need to expand it to avoid artifacts, 5 pixels should be enough + if "noise_mask" in latent_image: + noise_mask = self.prepare_mask(latent_image["noise_mask"], latent_image['samples'].shape) + work_latent["samples"] = noise_mask * work_latent["samples"] + (1-noise_mask) * latent_image["samples"] + work_latent['noise_mask'] = expand_mask(latent_image["noise_mask"].clone(), 5, True) + + return common_ksampler(model, main_seed, steps, cfg, sampler_name, scheduler, positive, negative, work_latent, denoise=1.0, disable_noise=disable_noise, start_step=start_at_step, last_step=end_at_step, force_full_denoise=force_full_denoise) + + +class KSamplerVariationsStochastic: + @classmethod + def INPUT_TYPES(s): + return {"required":{ + "model": ("MODEL",), + "latent_image": ("LATENT", ), + "noise_seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "steps": ("INT", {"default": 25, "min": 1, "max": 10000}), + "cfg": ("FLOAT", {"default": 7.0, "min": 0.0, "max": 100.0, "step":0.1, "round": 0.01}), + "sampler": (comfy.samplers.KSampler.SAMPLERS, ), + "scheduler": (comfy.samplers.KSampler.SCHEDULERS, ), + "positive": ("CONDITIONING", ), + "negative": ("CONDITIONING", ), + "variation_seed": ("INT:seed", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "variation_strength": ("FLOAT", {"default": 0.2, "min": 0.0, "max": 1.0, "step":0.05, "round": 0.01}), + #"variation_sampler": (comfy.samplers.KSampler.SAMPLERS, ), + "cfg_scale": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step":0.05, "round": 0.01}), + }} + + RETURN_TYPES = ("LATENT", ) + FUNCTION = "execute" + CATEGORY = "essentials/sampling" + + def execute(self, model, latent_image, noise_seed, steps, cfg, sampler, scheduler, positive, negative, variation_seed, variation_strength, cfg_scale, variation_sampler="dpmpp_2m_sde"): + # Stage 1: composition sampler + force_full_denoise = False # return with leftover noise = "enable" + disable_noise = False # add noise = "enable" + + end_at_step = max(int(steps * (1-variation_strength)), 1) + start_at_step = 0 + + work_latent = latent_image.copy() + batch_size = work_latent["samples"].shape[0] + work_latent["samples"] = work_latent["samples"][0].unsqueeze(0) + + stage1 = common_ksampler(model, noise_seed, steps, cfg, sampler, scheduler, positive, negative, work_latent, denoise=1.0, disable_noise=disable_noise, start_step=start_at_step, last_step=end_at_step, force_full_denoise=force_full_denoise)[0] + + if batch_size > 1: + stage1["samples"] = stage1["samples"].clone().repeat(batch_size, 1, 1, 1) + + # Stage 2: variation sampler + force_full_denoise = True + disable_noise = True + cfg = max(cfg * cfg_scale, 1.0) + start_at_step = end_at_step + end_at_step = steps + + return common_ksampler(model, variation_seed, steps, cfg, variation_sampler, scheduler, positive, negative, stage1, denoise=1.0, disable_noise=disable_noise, start_step=start_at_step, last_step=end_at_step, force_full_denoise=force_full_denoise) + +class InjectLatentNoise: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "latent": ("LATENT", ), + "noise_seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "noise_strength": ("FLOAT", {"default": 1.0, "min": -20.0, "max": 20.0, "step":0.01, "round": 0.01}), + "normalize": (["false", "true"], {"default": "false"}), + }, + "optional": { + "mask": ("MASK", ), + }} + + RETURN_TYPES = ("LATENT",) + FUNCTION = "execute" + CATEGORY = "essentials/sampling" + + def execute(self, latent, noise_seed, noise_strength, normalize="false", mask=None): + torch.manual_seed(noise_seed) + noise_latent = latent.copy() + original_samples = noise_latent["samples"].clone() + random_noise = torch.randn_like(original_samples) + + if normalize == "true": + mean = original_samples.mean() + std = original_samples.std() + random_noise = random_noise * std + mean + + random_noise = original_samples + random_noise * noise_strength + + if mask is not None: + mask = F.interpolate(mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])), size=(random_noise.shape[2], random_noise.shape[3]), mode="bilinear") + mask = mask.expand((-1,random_noise.shape[1],-1,-1)).clamp(0.0, 1.0) + if mask.shape[0] < random_noise.shape[0]: + mask = mask.repeat((random_noise.shape[0] -1) // mask.shape[0] + 1, 1, 1, 1)[:random_noise.shape[0]] + elif mask.shape[0] > random_noise.shape[0]: + mask = mask[:random_noise.shape[0]] + random_noise = mask * random_noise + (1-mask) * original_samples + + noise_latent["samples"] = random_noise + + return (noise_latent, ) + +class TextEncodeForSamplerParams: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "text": ("STRING", {"multiline": True, "dynamicPrompts": True, "default": "Separate prompts with at least three dashes\n---\nLike so"}), + "clip": ("CLIP", ) + }} + + RETURN_TYPES = ("CONDITIONING", ) + FUNCTION = "execute" + CATEGORY = "essentials/sampling" + + def execute(self, text, clip): + import re + output_text = [] + output_encoded = [] + text = re.sub(r'[-*=~]{4,}\n', '---\n', text) + text = text.split("---\n") + + for t in text: + t = t.strip() + if t: + output_text.append(t) + output_encoded.append(CLIPTextEncode().encode(clip, t)[0]) + + #if len(output_encoded) == 1: + # output = output_encoded[0] + #else: + output = {"text": output_text, "encoded": output_encoded} + + return (output, ) + +class SamplerSelectHelper: + @classmethod + def INPUT_TYPES(s): + return {"required": { + **{s: ("BOOLEAN", { "default": False }) for s in comfy.samplers.KSampler.SAMPLERS}, + }} + + RETURN_TYPES = ("STRING", ) + FUNCTION = "execute" + CATEGORY = "essentials/sampling" + + def execute(self, **values): + values = [v for v in values if values[v]] + values = ", ".join(values) + + return (values, ) + +class SchedulerSelectHelper: + @classmethod + def INPUT_TYPES(s): + return {"required": { + **{s: ("BOOLEAN", { "default": False }) for s in comfy.samplers.KSampler.SCHEDULERS}, + }} + + RETURN_TYPES = ("STRING", ) + FUNCTION = "execute" + CATEGORY = "essentials/sampling" + + def execute(self, **values): + values = [v for v in values if values[v]] + values = ", ".join(values) + + return (values, ) + +class LorasForFluxParams: + @classmethod + def INPUT_TYPES(s): + optional_loras = ['none'] + folder_paths.get_filename_list("loras") + return { + "required": { + "lora_1": (folder_paths.get_filename_list("loras"), {"tooltip": "The name of the LoRA."}), + "strength_model_1": ("STRING", { "multiline": False, "dynamicPrompts": False, "default": "1.0" }), + }, + #"optional": { + # "lora_2": (optional_loras, ), + # "strength_lora_2": ("STRING", { "multiline": False, "dynamicPrompts": False }), + # "lora_3": (optional_loras, ), + # "strength_lora_3": ("STRING", { "multiline": False, "dynamicPrompts": False }), + # "lora_4": (optional_loras, ), + # "strength_lora_4": ("STRING", { "multiline": False, "dynamicPrompts": False }), + #} + } + + RETURN_TYPES = ("LORA_PARAMS", ) + FUNCTION = "execute" + CATEGORY = "essentials/sampling" + + def execute(self, lora_1, strength_model_1, lora_2="none", strength_lora_2="", lora_3="none", strength_lora_3="", lora_4="none", strength_lora_4=""): + output = { "loras": [], "strengths": [] } + output["loras"].append(lora_1) + output["strengths"].append(parse_string_to_list(strength_model_1)) + + if lora_2 != "none": + output["loras"].append(lora_2) + if strength_lora_2 == "": + strength_lora_2 = "1.0" + output["strengths"].append(parse_string_to_list(strength_lora_2)) + if lora_3 != "none": + output["loras"].append(lora_3) + if strength_lora_3 == "": + strength_lora_3 = "1.0" + output["strengths"].append(parse_string_to_list(strength_lora_3)) + if lora_4 != "none": + output["loras"].append(lora_4) + if strength_lora_4 == "": + strength_lora_4 = "1.0" + output["strengths"].append(parse_string_to_list(strength_lora_4)) + + return (output,) + + +class FluxSamplerParams: + def __init__(self): + self.loraloader = None + self.lora = (None, None) + + @classmethod + def INPUT_TYPES(s): + return {"required": { + "model": ("MODEL", ), + "conditioning": ("CONDITIONING", ), + "latent_image": ("LATENT", ), + + "seed": ("STRING", { "multiline": False, "dynamicPrompts": False, "default": "?" }), + "sampler": ("STRING", { "multiline": False, "dynamicPrompts": False, "default": "euler" }), + "scheduler": ("STRING", { "multiline": False, "dynamicPrompts": False, "default": "simple" }), + "steps": ("STRING", { "multiline": False, "dynamicPrompts": False, "default": "20" }), + "guidance": ("STRING", { "multiline": False, "dynamicPrompts": False, "default": "3.5" }), + "max_shift": ("STRING", { "multiline": False, "dynamicPrompts": False, "default": "" }), + "base_shift": ("STRING", { "multiline": False, "dynamicPrompts": False, "default": "" }), + "denoise": ("STRING", { "multiline": False, "dynamicPrompts": False, "default": "1.0" }), + }, + "optional": { + "loras": ("LORA_PARAMS",), + }} + + RETURN_TYPES = ("LATENT","SAMPLER_PARAMS") + RETURN_NAMES = ("latent", "params") + FUNCTION = "execute" + CATEGORY = "essentials/sampling" + + def execute(self, model, conditioning, latent_image, seed, sampler, scheduler, steps, guidance, max_shift, base_shift, denoise, loras=None): + import random + import time + from comfy_extras.nodes_custom_sampler import Noise_RandomNoise, BasicScheduler, BasicGuider, SamplerCustomAdvanced + from comfy_extras.nodes_latent import LatentBatch + from comfy_extras.nodes_model_advanced import ModelSamplingFlux, ModelSamplingAuraFlow + from node_helpers import conditioning_set_values + from nodes import LoraLoader + + is_schnell = model.model.model_type == comfy.model_base.ModelType.FLOW + + noise = seed.replace("\n", ",").split(",") + noise = [random.randint(0, 999999) if "?" in n else int(n) for n in noise] + if not noise: + noise = [random.randint(0, 999999)] + + if sampler == '*': + sampler = comfy.samplers.KSampler.SAMPLERS + elif sampler.startswith("!"): + sampler = sampler.replace("\n", ",").split(",") + sampler = [s.strip("! ") for s in sampler] + sampler = [s for s in comfy.samplers.KSampler.SAMPLERS if s not in sampler] + else: + sampler = sampler.replace("\n", ",").split(",") + sampler = [s.strip() for s in sampler if s.strip() in comfy.samplers.KSampler.SAMPLERS] + if not sampler: + sampler = ['ipndm'] + + if scheduler == '*': + scheduler = comfy.samplers.KSampler.SCHEDULERS + elif scheduler.startswith("!"): + scheduler = scheduler.replace("\n", ",").split(",") + scheduler = [s.strip("! ") for s in scheduler] + scheduler = [s for s in comfy.samplers.KSampler.SCHEDULERS if s not in scheduler] + else: + scheduler = scheduler.replace("\n", ",").split(",") + scheduler = [s.strip() for s in scheduler] + scheduler = [s for s in scheduler if s in comfy.samplers.KSampler.SCHEDULERS] + if not scheduler: + scheduler = ['simple'] + + if steps == "": + if is_schnell: + steps = "4" + else: + steps = "20" + steps = parse_string_to_list(steps) + + denoise = "1.0" if denoise == "" else denoise + denoise = parse_string_to_list(denoise) + + guidance = "3.5" if guidance == "" else guidance + guidance = parse_string_to_list(guidance) + + if not is_schnell: + max_shift = "1.15" if max_shift == "" else max_shift + base_shift = "0.5" if base_shift == "" else base_shift + else: + max_shift = "0" + base_shift = "1.0" if base_shift == "" else base_shift + + max_shift = parse_string_to_list(max_shift) + base_shift = parse_string_to_list(base_shift) + + cond_text = None + if isinstance(conditioning, dict) and "encoded" in conditioning: + cond_text = conditioning["text"] + cond_encoded = conditioning["encoded"] + else: + cond_encoded = [conditioning] + + out_latent = None + out_params = [] + + basicschedueler = BasicScheduler() + basicguider = BasicGuider() + samplercustomadvanced = SamplerCustomAdvanced() + latentbatch = LatentBatch() + modelsamplingflux = ModelSamplingFlux() if not is_schnell else ModelSamplingAuraFlow() + width = latent_image["samples"].shape[3]*8 + height = latent_image["samples"].shape[2]*8 + + lora_strength_len = 1 + if loras: + lora_model = loras["loras"] + lora_strength = loras["strengths"] + lora_strength_len = sum(len(i) for i in lora_strength) + + if self.loraloader is None: + self.loraloader = LoraLoader() + + # count total number of samples + total_samples = len(cond_encoded) * len(noise) * len(max_shift) * len(base_shift) * len(guidance) * len(sampler) * len(scheduler) * len(steps) * len(denoise) * lora_strength_len + current_sample = 0 + if total_samples > 1: + pbar = ProgressBar(total_samples) + + lora_strength_len = 1 + if loras: + lora_strength_len = len(lora_strength[0]) + + for los in range(lora_strength_len): + if loras: + patched_model = self.loraloader.load_lora(model, None, lora_model[0], lora_strength[0][los], 0)[0] + else: + patched_model = model + + for i in range(len(cond_encoded)): + conditioning = cond_encoded[i] + ct = cond_text[i] if cond_text else None + for n in noise: + randnoise = Noise_RandomNoise(n) + for ms in max_shift: + for bs in base_shift: + if is_schnell: + work_model = modelsamplingflux.patch_aura(patched_model, bs)[0] + else: + work_model = modelsamplingflux.patch(patched_model, ms, bs, width, height)[0] + for g in guidance: + cond = conditioning_set_values(conditioning, {"guidance": g}) + guider = basicguider.get_guider(work_model, cond)[0] + for s in sampler: + samplerobj = comfy.samplers.sampler_object(s) + for sc in scheduler: + for st in steps: + for d in denoise: + sigmas = basicschedueler.get_sigmas(work_model, sc, st, d)[0] + current_sample += 1 + log = f"Sampling {current_sample}/{total_samples} with seed {n}, sampler {s}, scheduler {sc}, steps {st}, guidance {g}, max_shift {ms}, base_shift {bs}, denoise {d}" + lora_name = None + lora_str = 0 + if loras: + lora_name = lora_model[0] + lora_str = lora_strength[0][los] + log += f", lora {lora_name}, lora_strength {lora_str}" + logging.info(log) + start_time = time.time() + latent = samplercustomadvanced.sample(randnoise, guider, samplerobj, sigmas, latent_image)[1] + elapsed_time = time.time() - start_time + out_params.append({"time": elapsed_time, + "seed": n, + "width": width, + "height": height, + "sampler": s, + "scheduler": sc, + "steps": st, + "guidance": g, + "max_shift": ms, + "base_shift": bs, + "denoise": d, + "prompt": ct, + "lora": lora_name, + "lora_strength": lora_str}) + + if out_latent is None: + out_latent = latent + else: + out_latent = latentbatch.batch(out_latent, latent)[0] + if total_samples > 1: + pbar.update(1) + + return (out_latent, out_params) + +class PlotParameters: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "images": ("IMAGE", ), + "params": ("SAMPLER_PARAMS", ), + "order_by": (["none", "time", "seed", "steps", "denoise", "sampler", "scheduler", "guidance", "max_shift", "base_shift", "lora_strength"], ), + "cols_value": (["none", "time", "seed", "steps", "denoise", "sampler", "scheduler", "guidance", "max_shift", "base_shift", "lora_strength"], ), + "cols_num": ("INT", {"default": -1, "min": -1, "max": 1024 }), + "add_prompt": (["false", "true", "excerpt"], ), + "add_params": (["false", "true", "changes only"], {"default": "true"}), + }} + + RETURN_TYPES = ("IMAGE", ) + FUNCTION = "execute" + CATEGORY = "essentials/sampling" + + def execute(self, images, params, order_by, cols_value, cols_num, add_prompt, add_params): + from PIL import Image, ImageDraw, ImageFont + import math + import textwrap + + if images.shape[0] != len(params): + raise ValueError("Number of images and number of parameters do not match.") + + _params = params.copy() + + if order_by != "none": + sorted_params = sorted(_params, key=lambda x: x[order_by]) + indices = [_params.index(item) for item in sorted_params] + images = images[torch.tensor(indices)] + _params = sorted_params + + if cols_value != "none" and cols_num > -1: + groups = {} + for p in _params: + value = p[cols_value] + if value not in groups: + groups[value] = [] + groups[value].append(p) + cols_num = len(groups) + + sorted_params = [] + groups = list(groups.values()) + for g in zip(*groups): + sorted_params.extend(g) + + indices = [_params.index(item) for item in sorted_params] + images = images[torch.tensor(indices)] + _params = sorted_params + elif cols_num == 0: + cols_num = int(math.sqrt(images.shape[0])) + cols_num = max(1, min(cols_num, 1024)) + + width = images.shape[2] + out_image = [] + + font = ImageFont.truetype(os.path.join(FONTS_DIR, 'ShareTechMono-Regular.ttf'), min(48, int(32*(width/1024)))) + text_padding = 3 + line_height = font.getmask('Q').getbbox()[3] + font.getmetrics()[1] + text_padding*2 + char_width = font.getbbox('M')[2]+1 # using monospace font + + if add_params == "changes only": + value_tracker = {} + for p in _params: + for key, value in p.items(): + if key != "time": + if key not in value_tracker: + value_tracker[key] = set() + value_tracker[key].add(value) + changing_keys = {key for key, values in value_tracker.items() if len(values) > 1 or key == "prompt"} + + result = [] + for p in _params: + changing_params = {key: value for key, value in p.items() if key in changing_keys} + result.append(changing_params) + + _params = result + + for (image, param) in zip(images, _params): + image = image.permute(2, 0, 1) + + if add_params != "false": + if add_params == "changes only": + text = "\n".join([f"{key}: {value}" for key, value in param.items() if key != "prompt"]) + else: + text = f"time: {param['time']:.2f}s, seed: {param['seed']}, steps: {param['steps']}, size: {param['width']}Γ—{param['height']}\ndenoise: {param['denoise']}, sampler: {param['sampler']}, sched: {param['scheduler']}\nguidance: {param['guidance']}, max/base shift: {param['max_shift']}/{param['base_shift']}" + if 'lora' in param and param['lora']: + text += f"\nLoRA: {param['lora'][:32]}, str: {param['lora_strength']}" + + lines = text.split("\n") + text_height = line_height * len(lines) + text_image = Image.new('RGB', (width, text_height), color=(0, 0, 0)) + + for i, line in enumerate(lines): + draw = ImageDraw.Draw(text_image) + draw.text((text_padding, i * line_height + text_padding), line, font=font, fill=(255, 255, 255)) + + text_image = T.ToTensor()(text_image).to(image.device) + image = torch.cat([image, text_image], 1) + + if 'prompt' in param and param['prompt'] and add_prompt != "false": + prompt = param['prompt'] + if add_prompt == "excerpt": + prompt = " ".join(param['prompt'].split()[:64]) + prompt += "..." + + cols = math.ceil(width / char_width) + prompt_lines = textwrap.wrap(prompt, width=cols) + prompt_height = line_height * len(prompt_lines) + prompt_image = Image.new('RGB', (width, prompt_height), color=(0, 0, 0)) + + for i, line in enumerate(prompt_lines): + draw = ImageDraw.Draw(prompt_image) + draw.text((text_padding, i * line_height + text_padding), line, font=font, fill=(255, 255, 255)) + + prompt_image = T.ToTensor()(prompt_image).to(image.device) + image = torch.cat([image, prompt_image], 1) + + # a little cleanup + image = torch.nan_to_num(image, nan=0.0).clamp(0.0, 1.0) + out_image.append(image) + + # ensure all images have the same height + if add_prompt != "false" or add_params == "changes only": + max_height = max([image.shape[1] for image in out_image]) + out_image = [F.pad(image, (0, 0, 0, max_height - image.shape[1])) for image in out_image] + + out_image = torch.stack(out_image, 0).permute(0, 2, 3, 1) + + # merge images + if cols_num > -1: + cols = min(cols_num, out_image.shape[0]) + b, h, w, c = out_image.shape + rows = math.ceil(b / cols) + + # Pad the tensor if necessary + if b % cols != 0: + padding = cols - (b % cols) + out_image = F.pad(out_image, (0, 0, 0, 0, 0, 0, 0, padding)) + b = out_image.shape[0] + + # Reshape and transpose + out_image = out_image.reshape(rows, cols, h, w, c) + out_image = out_image.permute(0, 2, 1, 3, 4) + out_image = out_image.reshape(rows * h, cols * w, c).unsqueeze(0) + + """ + width = out_image.shape[2] + # add the title and notes on top + if title and export_labels: + title_font = ImageFont.truetype(os.path.join(FONTS_DIR, 'ShareTechMono-Regular.ttf'), 48) + title_width = title_font.getbbox(title)[2] + title_padding = 6 + title_line_height = title_font.getmask(title).getbbox()[3] + title_font.getmetrics()[1] + title_padding*2 + title_text_height = title_line_height + title_text_image = Image.new('RGB', (width, title_text_height), color=(0, 0, 0, 0)) + + draw = ImageDraw.Draw(title_text_image) + draw.text((width//2 - title_width//2, title_padding), title, font=title_font, fill=(255, 255, 255)) + + title_text_image = T.ToTensor()(title_text_image).unsqueeze(0).permute([0,2,3,1]).to(out_image.device) + out_image = torch.cat([title_text_image, out_image], 1) + """ + + return (out_image, ) + +class GuidanceTimestepping: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "model": ("MODEL",), + "value": ("FLOAT", {"default": 2.0, "min": 0.0, "max": 100.0, "step": 0.05}), + "start_at": ("FLOAT", {"default": 0.2, "min": 0.0, "max": 1.0, "step": 0.01}), + "end_at": ("FLOAT", {"default": 0.8, "min": 0.0, "max": 1.0, "step": 0.01}), + } + } + + RETURN_TYPES = ("MODEL",) + FUNCTION = "execute" + CATEGORY = "essentials/sampling" + + def execute(self, model, value, start_at, end_at): + sigma_start = model.get_model_object("model_sampling").percent_to_sigma(start_at) + sigma_end = model.get_model_object("model_sampling").percent_to_sigma(end_at) + + def apply_apg(args): + cond = args["cond"] + uncond = args["uncond"] + cond_scale = args["cond_scale"] + sigma = args["sigma"] + + sigma = sigma.detach().cpu()[0].item() + + if sigma <= sigma_start and sigma > sigma_end: + cond_scale = value + + return uncond + (cond - uncond) * cond_scale + + m = model.clone() + m.set_model_sampler_cfg_function(apply_apg) + return (m,) + +class ModelSamplingDiscreteFlowCustom(torch.nn.Module): + def __init__(self, model_config=None): + super().__init__() + if model_config is not None: + sampling_settings = model_config.sampling_settings + else: + sampling_settings = {} + + self.set_parameters(shift=sampling_settings.get("shift", 1.0), multiplier=sampling_settings.get("multiplier", 1000)) + + def set_parameters(self, shift=1.0, timesteps=1000, multiplier=1000, cut_off=1.0, shift_multiplier=0): + self.shift = shift + self.multiplier = multiplier + self.cut_off = cut_off + self.shift_multiplier = shift_multiplier + ts = self.sigma((torch.arange(1, timesteps + 1, 1) / timesteps) * multiplier) + self.register_buffer('sigmas', ts) + + @property + def sigma_min(self): + return self.sigmas[0] + + @property + def sigma_max(self): + return self.sigmas[-1] + + def timestep(self, sigma): + return sigma * self.multiplier + + def sigma(self, timestep): + shift = self.shift + if timestep.dim() == 0: + t = timestep.cpu().item() / self.multiplier + if t <= self.cut_off: + shift = shift * self.shift_multiplier + + return comfy.model_sampling.time_snr_shift(shift, timestep / self.multiplier) + + def percent_to_sigma(self, percent): + if percent <= 0.0: + return 1.0 + if percent >= 1.0: + return 0.0 + return 1.0 - percent + +class ModelSamplingSD3Advanced: + @classmethod + def INPUT_TYPES(s): + return {"required": { "model": ("MODEL",), + "shift": ("FLOAT", {"default": 3.0, "min": 0.0, "max": 100.0, "step":0.01}), + "cut_off": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step":0.05}), + "shift_multiplier": ("FLOAT", {"default": 2, "min": 0, "max": 10, "step":0.05}), + }} + + RETURN_TYPES = ("MODEL",) + FUNCTION = "execute" + + CATEGORY = "essentials/sampling" + + def execute(self, model, shift, multiplier=1000, cut_off=1.0, shift_multiplier=0): + m = model.clone() + + + sampling_base = ModelSamplingDiscreteFlowCustom + sampling_type = comfy.model_sampling.CONST + + class ModelSamplingAdvanced(sampling_base, sampling_type): + pass + + model_sampling = ModelSamplingAdvanced(model.model.model_config) + model_sampling.set_parameters(shift=shift, multiplier=multiplier, cut_off=cut_off, shift_multiplier=shift_multiplier) + m.add_object_patch("model_sampling", model_sampling) + + return (m, ) + +SAMPLING_CLASS_MAPPINGS = { + "KSamplerVariationsStochastic+": KSamplerVariationsStochastic, + "KSamplerVariationsWithNoise+": KSamplerVariationsWithNoise, + "InjectLatentNoise+": InjectLatentNoise, + "FluxSamplerParams+": FluxSamplerParams, + "GuidanceTimestepping+": GuidanceTimestepping, + "PlotParameters+": PlotParameters, + "TextEncodeForSamplerParams+": TextEncodeForSamplerParams, + "SamplerSelectHelper+": SamplerSelectHelper, + "SchedulerSelectHelper+": SchedulerSelectHelper, + "LorasForFluxParams+": LorasForFluxParams, + "ModelSamplingSD3Advanced+": ModelSamplingSD3Advanced, +} + +SAMPLING_NAME_MAPPINGS = { + "KSamplerVariationsStochastic+": "πŸ”§ KSampler Stochastic Variations", + "KSamplerVariationsWithNoise+": "πŸ”§ KSampler Variations with Noise Injection", + "InjectLatentNoise+": "πŸ”§ Inject Latent Noise", + "FluxSamplerParams+": "πŸ”§ Flux Sampler Parameters", + "GuidanceTimestepping+": "πŸ”§ Guidance Timestep (experimental)", + "PlotParameters+": "πŸ”§ Plot Sampler Parameters", + "TextEncodeForSamplerParams+": "πŸ”§Text Encode for Sampler Params", + "SamplerSelectHelper+": "πŸ”§ Sampler Select Helper", + "SchedulerSelectHelper+": "πŸ”§ Scheduler Select Helper", + "LorasForFluxParams+": "πŸ”§ LoRA for Flux Parameters", + "ModelSamplingSD3Advanced+": "πŸ”§ Model Sampling SD3 Advanced", +} \ No newline at end of file diff --git a/custom_nodes/ComfyUI-essentials-main/segmentation.py b/custom_nodes/ComfyUI-essentials-main/segmentation.py new file mode 100644 index 0000000000000000000000000000000000000000..55f870144afb2f51da26fd1c3bd97c03ed4223b7 --- /dev/null +++ b/custom_nodes/ComfyUI-essentials-main/segmentation.py @@ -0,0 +1,89 @@ +import torch +import torchvision.transforms.v2 as T +import torch.nn.functional as F +from .utils import expand_mask + +class LoadCLIPSegModels: + @classmethod + def INPUT_TYPES(s): + return { + "required": {}, + } + + RETURN_TYPES = ("CLIP_SEG",) + FUNCTION = "execute" + CATEGORY = "essentials/segmentation" + + def execute(self): + from transformers import CLIPSegProcessor, CLIPSegForImageSegmentation + processor = CLIPSegProcessor.from_pretrained("CIDAS/clipseg-rd64-refined") + model = CLIPSegForImageSegmentation.from_pretrained("CIDAS/clipseg-rd64-refined") + + return ((processor, model),) + +class ApplyCLIPSeg: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "clip_seg": ("CLIP_SEG",), + "image": ("IMAGE",), + "prompt": ("STRING", { "multiline": False, "default": "" }), + "threshold": ("FLOAT", { "default": 0.4, "min": 0.0, "max": 1.0, "step": 0.05 }), + "smooth": ("INT", { "default": 9, "min": 0, "max": 32, "step": 1 }), + "dilate": ("INT", { "default": 0, "min": -32, "max": 32, "step": 1 }), + "blur": ("INT", { "default": 0, "min": 0, "max": 64, "step": 1 }), + }, + } + + RETURN_TYPES = ("MASK",) + FUNCTION = "execute" + CATEGORY = "essentials/segmentation" + + def execute(self, image, clip_seg, prompt, threshold, smooth, dilate, blur): + processor, model = clip_seg + + imagenp = image.mul(255).clamp(0, 255).byte().cpu().numpy() + + outputs = [] + for i in imagenp: + inputs = processor(text=prompt, images=[i], return_tensors="pt") + out = model(**inputs) + out = out.logits.unsqueeze(1) + out = torch.sigmoid(out[0][0]) + out = (out > threshold) + outputs.append(out) + + del imagenp + + outputs = torch.stack(outputs, dim=0) + + if smooth > 0: + if smooth % 2 == 0: + smooth += 1 + outputs = T.functional.gaussian_blur(outputs, smooth) + + outputs = outputs.float() + + if dilate != 0: + outputs = expand_mask(outputs, dilate, True) + + if blur > 0: + if blur % 2 == 0: + blur += 1 + outputs = T.functional.gaussian_blur(outputs, blur) + + # resize to original size + outputs = F.interpolate(outputs.unsqueeze(1), size=(image.shape[1], image.shape[2]), mode='bicubic').squeeze(1) + + return (outputs,) + +SEG_CLASS_MAPPINGS = { + "ApplyCLIPSeg+": ApplyCLIPSeg, + "LoadCLIPSegModels+": LoadCLIPSegModels, +} + +SEG_NAME_MAPPINGS = { + "ApplyCLIPSeg+": "πŸ”§ Apply CLIPSeg", + "LoadCLIPSegModels+": "πŸ”§ Load CLIPSeg Models", +} \ No newline at end of file diff --git a/custom_nodes/ComfyUI-essentials-main/text.py b/custom_nodes/ComfyUI-essentials-main/text.py new file mode 100644 index 0000000000000000000000000000000000000000..ac52c4edd848bfe5358290597e212be105fb8a51 --- /dev/null +++ b/custom_nodes/ComfyUI-essentials-main/text.py @@ -0,0 +1,113 @@ +import os +import torch +from nodes import MAX_RESOLUTION +import torchvision.transforms.v2 as T +from .utils import FONTS_DIR + +class DrawText: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "text": ("STRING", { "multiline": True, "dynamicPrompts": True, "default": "Hello, World!" }), + "font": (sorted([f for f in os.listdir(FONTS_DIR) if f.endswith('.ttf') or f.endswith('.otf')]), ), + "size": ("INT", { "default": 56, "min": 1, "max": 9999, "step": 1 }), + "color": ("STRING", { "multiline": False, "default": "#FFFFFF" }), + "background_color": ("STRING", { "multiline": False, "default": "#00000000" }), + "shadow_distance": ("INT", { "default": 0, "min": 0, "max": 100, "step": 1 }), + "shadow_blur": ("INT", { "default": 0, "min": 0, "max": 100, "step": 1 }), + "shadow_color": ("STRING", { "multiline": False, "default": "#000000" }), + "horizontal_align": (["left", "center", "right"],), + "vertical_align": (["top", "center", "bottom"],), + "offset_x": ("INT", { "default": 0, "min": -MAX_RESOLUTION, "max": MAX_RESOLUTION, "step": 1 }), + "offset_y": ("INT", { "default": 0, "min": -MAX_RESOLUTION, "max": MAX_RESOLUTION, "step": 1 }), + "direction": (["ltr", "rtl"],), + }, + "optional": { + "img_composite": ("IMAGE",), + }, + } + + RETURN_TYPES = ("IMAGE", "MASK",) + FUNCTION = "execute" + CATEGORY = "essentials/text" + + def execute(self, text, font, size, color, background_color, shadow_distance, shadow_blur, shadow_color, horizontal_align, vertical_align, offset_x, offset_y, direction, img_composite=None): + from PIL import Image, ImageDraw, ImageFont, ImageColor, ImageFilter + + font = ImageFont.truetype(os.path.join(FONTS_DIR, font), size) + + lines = text.split("\n") + if direction == "rtl": + lines = [line[::-1] for line in lines] + + # Calculate the width and height of the text + text_width = max(font.getbbox(line)[2] for line in lines) + line_height = font.getmask(text).getbbox()[3] + font.getmetrics()[1] # add descent to height + text_height = line_height * len(lines) + + if img_composite is not None: + img_composite = T.ToPILImage()(img_composite.permute([0,3,1,2])[0]).convert('RGBA') + width = img_composite.width + height = img_composite.height + image = Image.new('RGBA', (width, height), color=background_color) + else: + width = text_width + height = text_height + background_color = ImageColor.getrgb(background_color) + image = Image.new('RGBA', (width + shadow_distance, height + shadow_distance), color=background_color) + + image_shadow = None + if shadow_distance > 0: + image_shadow = image.copy() + #image_shadow = Image.new('RGBA', (width + shadow_distance, height + shadow_distance), color=background_color) + + for i, line in enumerate(lines): + line_width = font.getbbox(line)[2] + #text_height =font.getbbox(line)[3] + if horizontal_align == "left": + x = 0 + elif horizontal_align == "center": + x = (width - line_width) / 2 + elif horizontal_align == "right": + x = width - line_width + + if vertical_align == "top": + y = 0 + elif vertical_align == "center": + y = (height - text_height) / 2 + elif vertical_align == "bottom": + y = height - text_height + + x += offset_x + y += i * line_height + offset_y + + draw = ImageDraw.Draw(image) + draw.text((x, y), line, font=font, fill=color) + + if image_shadow is not None: + draw = ImageDraw.Draw(image_shadow) + draw.text((x + shadow_distance, y + shadow_distance), line, font=font, fill=shadow_color) + + if image_shadow is not None: + image_shadow = image_shadow.filter(ImageFilter.GaussianBlur(shadow_blur)) + image = Image.alpha_composite(image_shadow, image) + + #image = T.ToTensor()(image).unsqueeze(0).permute([0,2,3,1]) + mask = T.ToTensor()(image).unsqueeze(0).permute([0,2,3,1]) + mask = mask[:, :, :, 3] if mask.shape[3] == 4 else torch.ones_like(mask[:, :, :, 0]) + + if img_composite is not None: + image = Image.alpha_composite(img_composite, image) + + image = T.ToTensor()(image).unsqueeze(0).permute([0,2,3,1]) + + return (image[:, :, :, :3], mask,) + +TEXT_CLASS_MAPPINGS = { + "DrawText+": DrawText, +} + +TEXT_NAME_MAPPINGS = { + "DrawText+": "πŸ”§ Draw Text", +} \ No newline at end of file diff --git a/custom_nodes/ComfyUI-essentials-main/utils.py b/custom_nodes/ComfyUI-essentials-main/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..aa1b1146c9d03e67c0b2ca313c8ff0dd66bcfaa7 --- /dev/null +++ b/custom_nodes/ComfyUI-essentials-main/utils.py @@ -0,0 +1,89 @@ +import torch +import numpy as np +import scipy +import os +#import re +from pathlib import Path +import folder_paths + +FONTS_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), "fonts") + +SCRIPT_DIR = Path(__file__).parent +folder_paths.add_model_folder_path("luts", (SCRIPT_DIR / "luts").as_posix()) +folder_paths.add_model_folder_path( + "luts", (Path(folder_paths.models_dir) / "luts").as_posix() +) + +# from https://github.com/pythongosssss/ComfyUI-Custom-Scripts +class AnyType(str): + def __ne__(self, __value: object) -> bool: + return False + +def min_(tensor_list): + # return the element-wise min of the tensor list. + x = torch.stack(tensor_list) + mn = x.min(axis=0)[0] + return torch.clamp(mn, min=0) + +def max_(tensor_list): + # return the element-wise max of the tensor list. + x = torch.stack(tensor_list) + mx = x.max(axis=0)[0] + return torch.clamp(mx, max=1) + +def expand_mask(mask, expand, tapered_corners): + c = 0 if tapered_corners else 1 + kernel = np.array([[c, 1, c], + [1, 1, 1], + [c, 1, c]]) + mask = mask.reshape((-1, mask.shape[-2], mask.shape[-1])) + out = [] + for m in mask: + output = m.numpy() + for _ in range(abs(expand)): + if expand < 0: + output = scipy.ndimage.grey_erosion(output, footprint=kernel) + else: + output = scipy.ndimage.grey_dilation(output, footprint=kernel) + output = torch.from_numpy(output) + out.append(output) + + return torch.stack(out, dim=0) + +def parse_string_to_list(s): + elements = s.split(',') + result = [] + + def parse_number(s): + try: + if '.' in s: + return float(s) + else: + return int(s) + except ValueError: + return 0 + + def decimal_places(s): + if '.' in s: + return len(s.split('.')[1]) + return 0 + + for element in elements: + element = element.strip() + if '...' in element: + start, rest = element.split('...') + end, step = rest.split('+') + decimals = decimal_places(step) + start = parse_number(start) + end = parse_number(end) + step = parse_number(step) + current = start + if (start > end and step > 0) or (start < end and step < 0): + step = -step + while current <= end: + result.append(round(current, decimals)) + current += step + else: + result.append(round(parse_number(element), decimal_places(element))) + + return result \ No newline at end of file diff --git a/custom_nodes/ComfyUI-essentials-main/workflow_all_nodes.json b/custom_nodes/ComfyUI-essentials-main/workflow_all_nodes.json new file mode 100644 index 0000000000000000000000000000000000000000..fab4c98929e12b9f7ac35bd433ffa55af81480b2 --- /dev/null +++ b/custom_nodes/ComfyUI-essentials-main/workflow_all_nodes.json @@ -0,0 +1,994 @@ +{ + "last_node_id": 42, + "last_link_id": 61, + "nodes": [ + { + "id": 9, + "type": "ConsoleDebug+", + "pos": [ + 720, + 140 + ], + "size": { + "0": 210, + "1": 60 + }, + "flags": {}, + "order": 12, + "mode": 0, + "inputs": [ + { + "name": "value", + "type": "*", + "link": 3 + } + ], + "properties": { + "Node name for S&R": "ConsoleDebug+" + }, + "widgets_values": [ + "Height:" + ] + }, + { + "id": 28, + "type": "PreviewImage", + "pos": [ + 860, + 1180 + ], + "size": { + "0": 210, + "1": 246 + }, + "flags": {}, + "order": 17, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 23 + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 12, + "type": "PreviewImage", + "pos": [ + 860, + 580 + ], + "size": { + "0": 210, + "1": 246 + }, + "flags": {}, + "order": 15, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 11 + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 14, + "type": "PreviewImage", + "pos": [ + 860, + 880 + ], + "size": { + "0": 210, + "1": 246 + }, + "flags": {}, + "order": 16, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 13 + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 18, + "type": "MaskPreview+", + "pos": [ + 2100, + 90 + ], + "size": { + "0": 210, + "1": 246 + }, + "flags": {}, + "order": 20, + "mode": 0, + "inputs": [ + { + "name": "mask", + "type": "MASK", + "link": 19 + } + ], + "properties": { + "Node name for S&R": "MaskPreview+" + } + }, + { + "id": 1, + "type": "GetImageSize+", + "pos": [ + 450, + 80 + ], + "size": { + "0": 210, + "1": 46 + }, + "flags": {}, + "order": 2, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 1 + } + ], + "outputs": [ + { + "name": "width", + "type": "INT", + "links": [ + 2 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "height", + "type": "INT", + "links": [ + 3 + ], + "shape": 3, + "slot_index": 1 + } + ], + "properties": { + "Node name for S&R": "GetImageSize+" + } + }, + { + "id": 8, + "type": "ConsoleDebug+", + "pos": [ + 720, + 40 + ], + "size": { + "0": 210, + "1": 60 + }, + "flags": {}, + "order": 11, + "mode": 0, + "inputs": [ + { + "name": "value", + "type": "*", + "link": 2 + } + ], + "properties": { + "Node name for S&R": "ConsoleDebug+" + }, + "widgets_values": [ + "Width:" + ] + }, + { + "id": 10, + "type": "PreviewImage", + "pos": [ + 860, + 280 + ], + "size": { + "0": 210, + "1": 246 + }, + "flags": {}, + "order": 13, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 9 + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 36, + "type": "SimpleMath+", + "pos": [ + 1650, + 780 + ], + "size": { + "0": 210, + "1": 80 + }, + "flags": {}, + "order": 14, + "mode": 0, + "inputs": [ + { + "name": "a", + "type": "INT,FLOAT", + "link": 44 + }, + { + "name": "b", + "type": "INT,FLOAT", + "link": 45 + } + ], + "outputs": [ + { + "name": "INT", + "type": "INT", + "links": [ + 46 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "FLOAT", + "type": "FLOAT", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "SimpleMath+" + }, + "widgets_values": [ + "a*b" + ] + }, + { + "id": 23, + "type": "ConsoleDebug+", + "pos": [ + 1920, + 780 + ], + "size": { + "0": 210, + "1": 60 + }, + "flags": {}, + "order": 22, + "mode": 0, + "inputs": [ + { + "name": "value", + "type": "*", + "link": 46 + } + ], + "properties": { + "Node name for S&R": "ConsoleDebug+" + }, + "widgets_values": [ + "Value:" + ] + }, + { + "id": 2, + "type": "ImageResize+", + "pos": [ + 430, + 340 + ], + "size": { + "0": 310, + "1": 170 + }, + "flags": {}, + "order": 3, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 4 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 9 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "width", + "type": "INT", + "links": [ + 44 + ], + "shape": 3, + "slot_index": 1 + }, + { + "name": "height", + "type": "INT", + "links": [ + 45 + ], + "shape": 3, + "slot_index": 2 + } + ], + "properties": { + "Node name for S&R": "ImageResize+" + }, + "widgets_values": [ + 256, + 64, + "lanczos", + true + ] + }, + { + "id": 4, + "type": "ImageFlip+", + "pos": [ + 430, + 800 + ], + "size": { + "0": 310, + "1": 60 + }, + "flags": {}, + "order": 4, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 6 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 11 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "ImageFlip+" + }, + "widgets_values": [ + "xy" + ] + }, + { + "id": 6, + "type": "ImagePosterize+", + "pos": [ + 430, + 1000 + ], + "size": { + "0": 310, + "1": 60 + }, + "flags": {}, + "order": 5, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 8 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 13 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "ImagePosterize+" + }, + "widgets_values": [ + 0.5 + ] + }, + { + "id": 27, + "type": "ImageCASharpening+", + "pos": [ + 430, + 1110 + ], + "size": { + "0": 310.79998779296875, + "1": 60 + }, + "flags": {}, + "order": 6, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 22 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 23 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "ImageCASharpening+" + }, + "widgets_values": [ + 0.8 + ] + }, + { + "id": 15, + "type": "MaskBlur+", + "pos": [ + 1690, + 130 + ], + "size": { + "0": 310, + "1": 82 + }, + "flags": {}, + "order": 9, + "mode": 0, + "inputs": [ + { + "name": "mask", + "type": "MASK", + "link": 14 + } + ], + "outputs": [ + { + "name": "MASK", + "type": "MASK", + "links": [ + 19 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "MaskBlur+" + }, + "widgets_values": [ + 45, + 28.5 + ] + }, + { + "id": 16, + "type": "MaskFlip+", + "pos": [ + 1690, + 270 + ], + "size": { + "0": 310, + "1": 60 + }, + "flags": {}, + "order": 10, + "mode": 0, + "inputs": [ + { + "name": "mask", + "type": "MASK", + "link": 15 + } + ], + "outputs": [ + { + "name": "MASK", + "type": "MASK", + "links": [ + 18 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "MaskFlip+" + }, + "widgets_values": [ + "xy" + ] + }, + { + "id": 13, + "type": "PreviewImage", + "pos": [ + 1100, + 760 + ], + "size": { + "0": 210, + "1": 246 + }, + "flags": {}, + "order": 18, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 49 + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 37, + "type": "ImageDesaturate+", + "pos": [ + 500, + 920 + ], + "size": { + "0": 190, + "1": 30 + }, + "flags": {}, + "order": 7, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 48 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 49 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "ImageDesaturate+" + } + }, + { + "id": 7, + "type": "LoadImage", + "pos": [ + -90, + 650 + ], + "size": { + "0": 315, + "1": 314 + }, + "flags": {}, + "order": 0, + "mode": 0, + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 1, + 4, + 6, + 8, + 22, + 48, + 57 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "MASK", + "type": "MASK", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "LoadImage" + }, + "widgets_values": [ + "venere.jpg", + "image" + ] + }, + { + "id": 11, + "type": "PreviewImage", + "pos": [ + 1100, + 450 + ], + "size": { + "0": 210, + "1": 246 + }, + "flags": {}, + "order": 19, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 58 + } + ], + "properties": { + "Node name for S&R": "PreviewImage" + } + }, + { + "id": 40, + "type": "ImageCrop+", + "pos": [ + 430, + 560 + ], + "size": { + "0": 310, + "1": 194 + }, + "flags": {}, + "order": 8, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 57 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 58 + ], + "shape": 3, + "slot_index": 0 + }, + { + "name": "x", + "type": "INT", + "links": null, + "shape": 3 + }, + { + "name": "y", + "type": "INT", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "ImageCrop+" + }, + "widgets_values": [ + 256, + 256, + "center", + 0, + 0 + ] + }, + { + "id": 20, + "type": "LoadImageMask", + "pos": [ + 1400, + 260 + ], + "size": { + "0": 220.70516967773438, + "1": 318 + }, + "flags": {}, + "order": 1, + "mode": 0, + "outputs": [ + { + "name": "MASK", + "type": "MASK", + "links": [ + 14, + 15 + ], + "shape": 3, + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "LoadImageMask" + }, + "widgets_values": [ + "cwf_inpaint_example_mask.png", + "alpha", + "image" + ] + }, + { + "id": 21, + "type": "MaskPreview+", + "pos": [ + 2100, + 380 + ], + "size": { + "0": 210, + "1": 246 + }, + "flags": {}, + "order": 21, + "mode": 0, + "inputs": [ + { + "name": "mask", + "type": "MASK", + "link": 18 + } + ], + "properties": { + "Node name for S&R": "MaskPreview+" + } + } + ], + "links": [ + [ + 1, + 7, + 0, + 1, + 0, + "IMAGE" + ], + [ + 2, + 1, + 0, + 8, + 0, + "*" + ], + [ + 3, + 1, + 1, + 9, + 0, + "*" + ], + [ + 4, + 7, + 0, + 2, + 0, + "IMAGE" + ], + [ + 6, + 7, + 0, + 4, + 0, + "IMAGE" + ], + [ + 8, + 7, + 0, + 6, + 0, + "IMAGE" + ], + [ + 9, + 2, + 0, + 10, + 0, + "IMAGE" + ], + [ + 11, + 4, + 0, + 12, + 0, + "IMAGE" + ], + [ + 13, + 6, + 0, + 14, + 0, + "IMAGE" + ], + [ + 14, + 20, + 0, + 15, + 0, + "MASK" + ], + [ + 15, + 20, + 0, + 16, + 0, + "MASK" + ], + [ + 18, + 16, + 0, + 21, + 0, + "MASK" + ], + [ + 19, + 15, + 0, + 18, + 0, + "MASK" + ], + [ + 22, + 7, + 0, + 27, + 0, + "IMAGE" + ], + [ + 23, + 27, + 0, + 28, + 0, + "IMAGE" + ], + [ + 44, + 2, + 1, + 36, + 0, + "INT,FLOAT" + ], + [ + 45, + 2, + 2, + 36, + 1, + "INT,FLOAT" + ], + [ + 46, + 36, + 0, + 23, + 0, + "*" + ], + [ + 48, + 7, + 0, + 37, + 0, + "IMAGE" + ], + [ + 49, + 37, + 0, + 13, + 0, + "IMAGE" + ], + [ + 57, + 7, + 0, + 40, + 0, + "IMAGE" + ], + [ + 58, + 40, + 0, + 11, + 0, + "IMAGE" + ] + ], + "groups": [], + "config": {}, + "extra": {}, + "version": 0.4 +} \ No newline at end of file diff --git a/requirements.txt b/requirements.txt index 064605906b0a6f56e34d41ea48005a76c59b455c..49353a9153033d0b6e9e8f33ff6b7b4a1733e572 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,35 +1,39 @@ -comfyui-frontend-package==1.14.5 -torch -torchsde -torchvision -torchaudio -numpy>=1.25.0 -einops -transformers>=4.28.1 -tokenizers>=0.13.3 -sentencepiece -safetensors>=0.4.2 aiohttp>=3.11.8 -yarl>=1.18.0 -pyyaml -Pillow -scipy -tqdm -psutil - -#non essential dependencies: -kornia>=0.7.1 -spandrel -soundfile -av - albumentations>=1.4.16 +av +color-matcher +colour-science +comfyui-frontend-package==1.14.5 dill +einops gradio huggingface_hub insightface==0.7.3 +kornia>=0.7.1 +matplotlib +mss numba +numpy>=1.25.0 onnx>=1.14.0 opencv-python>=4.7.0.72 piexif -ultralytics \ No newline at end of file +pillow>=10.3.0 +pixeloe +psutil +pyyaml +rembg +safetensors>=0.4.2 +scipy +sentencepiece +soundfile +spandrel +tokenizers>=0.13.3 +torch +torchaudio +torchsde +torchvision +tqdm +transformers>=4.28.1 +transparent-background +ultralytics +yarl>=1.18.0 \ No newline at end of file