File size: 3,904 Bytes
6aa4d81
079b1b4
6aa4d81
 
 
 
 
 
97e7f7b
 
6aa4d81
85e3861
97e7f7b
 
584121b
 
 
 
 
97e7f7b
 
 
 
 
4eafbf3
85e3861
 
 
 
 
 
 
 
97e7f7b
85e3861
 
 
97e7f7b
 
6aa4d81
 
 
 
 
97e7f7b
 
 
 
 
 
 
85e3861
97e7f7b
7a2be17
97e7f7b
 
 
 
 
85e3861
 
97e7f7b
 
 
250758a
 
 
 
 
 
 
97e7f7b
85e3861
7a2be17
 
97e7f7b
 
 
85e3861
 
 
 
97e7f7b
 
85e3861
6aa4d81
 
85e3861
 
6aa4d81
 
 
 
 
 
85e3861
6aa4d81
 
 
 
85e3861
6aa4d81
 
 
 
 
 
 
85e3861
6aa4d81
 
 
 
97e7f7b
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
import os
import sys
import random
import torch
from pathlib import Path
from PIL import Image
import gradio as gr
from huggingface_hub import hf_hub_download
import spaces
from typing import Union, Sequence, Mapping, Any

# Configuração inicial e diagnóstico CUDA
print("Python version:", sys.version)
print("Torch version:", torch.__version__)
print("CUDA disponível:", torch.cuda.is_available())
print("Quantidade de GPUs:", torch.cuda.device_count())
if torch.cuda.is_available():
    print("GPU atual:", torch.cuda.get_device_name(0))

# Adicionar o caminho da pasta ComfyUI ao sys.path
current_dir = os.path.dirname(os.path.abspath(__file__))
comfyui_path = os.path.join(current_dir, "ComfyUI")
sys.path.append(comfyui_path)

# Importar ComfyUI components
sys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(__file__)), "ComfyUI/comfy"))

import comfy.diffusers_load
import comfy.samplers
import comfy.sample
import comfy.sd
import comfy.utils
from comfy.cli_args import args
import folder_paths

# Importar nós do ComfyUI
from nodes import CLIPTextEncode, VAEDecode, EmptyLatentImage, VAEEncode

# Configuração de diretórios
BASE_DIR = os.path.dirname(os.path.realpath(__file__))
output_dir = os.path.join(BASE_DIR, "output")
os.makedirs(output_dir, exist_ok=True)
folder_paths.set_output_directory(output_dir)

# Helper function
def get_value_at_index(obj: Union[Sequence, Mapping], index: int) -> Any:
    try:
        return obj[index]
    except KeyError:
        return obj["result"][index]

# Baixar modelos
def download_models():
    print("Baixando modelos...")
    models = [
        ("black-forest-labs/FLUX.1-Redux-dev", "flux1-redux-dev.safetensors", "models/style_models"),
        ("comfyanonymous/flux_text_encoders", "t5xxl_fp16.safetensors", "models/text_encoders"),
        ("zer0int/CLIP-GmP-ViT-L-14", "ViT-L-14-TEXT-detail-improved-hiT-GmP-HF.safetensors", "models/text_encoders"),
        ("black-forest-labs/FLUX.1-dev", "ae.safetensors", "models/vae"),
        ("black-forest-labs/FLUX.1-dev", "flux1-dev.safetensors", "models/diffusion_models"),
        ("google/siglip-so400m-patch14-384", "model.safetensors", "models/clip_vision")
    ]
    
    for repo_id, filename, local_dir in models:
        try:
            os.makedirs(local_dir, exist_ok=True)
            print(f"Baixando {filename} de {repo_id}...")
            hf_hub_download(repo_id=repo_id, filename=filename, local_dir=local_dir)
        except Exception as e:
            print(f"Erro ao baixar {filename} de {repo_id}: {str(e)}")
            continue

# Download models no início
download_models()

# Inicializar modelos
print("Inicializando modelos...")
with torch.inference_mode():
    clip_text_encode = CLIPTextEncode()
    vae_decode = VAEDecode()
    vae_encode = VAEEncode()
    empty_latent = EmptyLatentImage()

@spaces.GPU
def generate_image(prompt, input_image, strength, progress=gr.Progress(track_tqdm=True)):
    try:
        with torch.inference_mode():
            # Seu código de geração aqui
            pass
    except Exception as e:
        print(f"Erro ao gerar imagem: {str(e)}")
        return None

# Interface Gradio
with gr.Blocks() as app:
    gr.Markdown("# Gerador de Imagens FLUX")
    with gr.Row():
        with gr.Column():
            prompt_input = gr.Textbox(label="Prompt", placeholder="Digite seu prompt aqui...", lines=5)
            input_image = gr.Image(label="Imagem de Entrada", type="filepath")
            strength = gr.Slider(minimum=0, maximum=2, step=0.1, value=1.0, label="Força")
            generate_btn = gr.Button("Gerar Imagem")

        with gr.Column():
            output_image = gr.Image(label="Imagem Gerada", type="filepath")

    generate_btn.click(
        fn=generate_image,
        inputs=[prompt_input, input_image, strength],
        outputs=[output_image]
    )

if __name__ == "__main__":
    app.launch()