Spaces:
Running
on
Zero
Running
on
Zero
import os | |
import sys | |
import random | |
from typing import Sequence, Mapping, Any, Union | |
import torch | |
import gradio as gr | |
from PIL import Image | |
from huggingface_hub import hf_hub_download | |
##################################### | |
# 1. Fun莽玫es auxiliares de caminho e import | |
##################################### | |
def find_path(name: str, path: str = None) -> str: | |
"""Busca recursivamente por uma pasta/arquivo 'name' a partir de 'path'.""" | |
if path is None: | |
path = os.getcwd() | |
if name in os.listdir(path): | |
path_name = os.path.join(path, name) | |
print(f"{name} encontrado em: {path_name}") | |
return path_name | |
parent_directory = os.path.dirname(path) | |
if parent_directory == path: | |
return None | |
return find_path(name, parent_directory) | |
def add_comfyui_directory_to_sys_path() -> None: | |
"""Adiciona o diret贸rio ComfyUI ao sys.path, caso encontrado.""" | |
comfyui_path = find_path("ComfyUI") | |
if comfyui_path is not None and os.path.isdir(comfyui_path): | |
sys.path.append(comfyui_path) | |
print(f"Diret贸rio ComfyUI adicionado ao sys.path: {comfyui_path}") | |
else: | |
print("N茫o foi poss铆vel encontrar o diret贸rio ComfyUI.") | |
def import_custom_nodes() -> None: | |
""" | |
Inicializa os n贸s extras do ComfyUI, sem importar o servidor. | |
""" | |
from nodes import init_extra_nodes | |
init_extra_nodes() | |
##################################### | |
# 2. Configurando o ambiente | |
##################################### | |
add_comfyui_directory_to_sys_path() | |
import_custom_nodes() | |
##################################### | |
# 3. Importando n贸s do ComfyUI | |
##################################### | |
from comfy import model_management | |
from nodes import ( | |
NODE_CLASS_MAPPINGS, | |
DualCLIPLoader, | |
CLIPVisionLoader, | |
StyleModelLoader, | |
VAELoader, | |
CLIPTextEncode, | |
LoadImage, | |
EmptyLatentImage, | |
VAEDecode | |
) | |
##################################### | |
# 4. Download de modelos (ajuste conforme sua necessidade) | |
##################################### | |
# Criando pastas de modelos, se necess谩rio | |
os.makedirs("models/text_encoders", exist_ok=True) | |
os.makedirs("models/style_models", exist_ok=True) | |
os.makedirs("models/diffusion_models", exist_ok=True) | |
os.makedirs("models/vae", exist_ok=True) | |
os.makedirs("models/clip_vision", exist_ok=True) | |
# Baixando os modelos necess谩rios | |
try: | |
print("Baixando modelos...") | |
hf_hub_download(repo_id="black-forest-labs/FLUX.1-Redux-dev", | |
filename="flux1-redux-dev.safetensors", | |
local_dir="models/style_models") | |
hf_hub_download(repo_id="comfyanonymous/flux_text_encoders", | |
filename="t5xxl_fp16.safetensors", | |
local_dir="models/text_encoders") | |
hf_hub_download(repo_id="zer0int/CLIP-GmP-ViT-L-14", | |
filename="ViT-L-14-TEXT-detail-improved-hiT-GmP-HF.safetensors", | |
local_dir="models/text_encoders") | |
hf_hub_download(repo_id="black-forest-labs/FLUX.1-dev", | |
filename="ae.safetensors", | |
local_dir="models/vae") | |
hf_hub_download(repo_id="black-forest-labs/FLUX.1-dev", | |
filename="flux1-dev.safetensors", | |
local_dir="models/diffusion_models") | |
hf_hub_download(repo_id="google/siglip-so400m-patch14-384", | |
filename="model.safetensors", | |
local_dir="models/clip_vision") | |
except Exception as e: | |
print("Erro ao baixar modelos:", e) | |
##################################### | |
# 5. Carregando os modelos do ComfyUI | |
##################################### | |
# Inicializando n贸s e modelos | |
dualcliploader = DualCLIPLoader() | |
clip_model = dualcliploader.load_clip( | |
clip_name1="t5xxl_fp16.safetensors", | |
clip_name2="ViT-L-14-TEXT-detail-improved-hiT-GmP-HF.safetensors", | |
type="flux" | |
) | |
clipvisionloader = CLIPVisionLoader() | |
clip_vision_model = clipvisionloader.load_clip( | |
clip_name="model.safetensors" | |
) | |
stylemodelloader = StyleModelLoader() | |
style_model = stylemodelloader.load_style_model( | |
style_model_name="flux1-redux-dev.safetensors" | |
) | |
vaeloader = VAELoader() | |
vae_model = vaeloader.load_vae( | |
vae_name="ae.safetensors" | |
) | |
model_management.load_models_gpu([ | |
clip_model[0], clip_vision_model[0], style_model[0], vae_model[0] | |
]) | |
##################################### | |
# 6. Fun莽茫o de gera莽茫o de imagem | |
##################################### | |
def get_value_at_index(obj: Union[Sequence, Mapping], index: int) -> Any: | |
"""Retorna o valor no 铆ndice especificado.""" | |
try: | |
return obj[index] | |
except KeyError: | |
return obj["result"][index] | |
def generate_image( | |
prompt: str, | |
input_image_path: str, | |
guidance: float, | |
downsampling_factor: float, | |
weight: float, | |
seed: int, | |
width: int, | |
height: int, | |
steps: int, | |
progress=gr.Progress(track_tqdm=True) | |
): | |
""" | |
Gera uma imagem usando os n贸s do ComfyUI. | |
""" | |
try: | |
# Garantindo repetibilidade do seed | |
torch.manual_seed(seed) | |
random.seed(seed) | |
# Encode do texto | |
cliptextencode = CLIPTextEncode() | |
encoded_text = cliptextencode.encode( | |
text=prompt, | |
clip=get_value_at_index(clip_model, 0) | |
) | |
# Carregar imagem de entrada | |
loadimage = LoadImage() | |
loaded_image = loadimage.load_image(image=input_image_path) | |
# Guidance | |
fluxguidance = NODE_CLASS_MAPPINGS["FluxGuidance"]() | |
flux_guided = fluxguidance.append( | |
guidance=guidance, | |
conditioning=get_value_at_index(encoded_text, 0) | |
) | |
# Aplicar estilo | |
reduxadvanced = NODE_CLASS_MAPPINGS["ReduxAdvanced"]() | |
styled_image = reduxadvanced.apply_stylemodel( | |
downsampling_factor=downsampling_factor, | |
downsampling_function="area", | |
mode="keep aspect ratio", | |
weight=weight, | |
conditioning=get_value_at_index(flux_guided, 0), | |
style_model=get_value_at_index(style_model, 0), | |
clip_vision=get_value_at_index(clip_vision_model, 0), | |
image=get_value_at_index(loaded_image, 0) | |
) | |
# Gerar imagem final (decodificar do VAE) | |
vaedecode = VAEDecode() | |
decoded_image = vaedecode.decode( | |
samples=get_value_at_index(styled_image, 0), | |
vae=get_value_at_index(vae_model, 0) | |
) | |
# Salvar a imagem | |
output_dir = "output" | |
os.makedirs(output_dir, exist_ok=True) | |
output_path = os.path.join(output_dir, f"generated_{random.randint(1, 99999)}.png") | |
Image.fromarray((decoded_image[0] * 255).astype("uint8")).save(output_path) | |
return output_path | |
except Exception as e: | |
print("Erro ao gerar imagem:", e) | |
return None | |
##################################### | |
# 7. Interface Gradio | |
##################################### | |
with gr.Blocks() as app: | |
gr.Markdown("# FLUX Redux Image Generator") | |
with gr.Row(): | |
with gr.Column(): | |
prompt_input = gr.Textbox(label="Prompt", placeholder="Escreva seu prompt...", lines=3) | |
input_image = gr.Image(label="Imagem de Entrada", type="filepath") | |
guidance_slider = gr.Slider(minimum=0, maximum=20, step=0.1, value=3.5, label="Guidance") | |
downsampling_factor_slider = gr.Slider(minimum=1, maximum=8, step=1, value=3, label="Downsampling Factor") | |
weight_slider = gr.Slider(minimum=0, maximum=2, step=0.1, value=1.0, label="Peso do Estilo") | |
seed_input = gr.Number(label="Seed", value=random.randint(1, 2**32), precision=0) | |
width_input = gr.Number(label="Largura", value=512, precision=0) | |
height_input = gr.Number(label="Altura", value=512, precision=0) | |
steps_input = gr.Number(label="Passos", value=50, precision=0) | |
generate_btn = gr.Button("Gerar Imagem") | |
with gr.Column(): | |
output_image = gr.Image(label="Imagem Gerada") | |
generate_btn.click( | |
fn=generate_image, | |
inputs=[ | |
prompt_input, input_image, guidance_slider, | |
downsampling_factor_slider, weight_slider, | |
seed_input, width_input, height_input, steps_input | |
], | |
outputs=[output_image] | |
) | |
if __name__ == "__main__": | |
app.launch() | |