Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -8,9 +8,8 @@ import gradio as gr
|
|
8 |
from huggingface_hub import hf_hub_download
|
9 |
import spaces
|
10 |
from typing import Union, Sequence, Mapping, Any
|
11 |
-
import asyncio
|
12 |
|
13 |
-
#
|
14 |
print("Python version:", sys.version)
|
15 |
print("Torch version:", torch.__version__)
|
16 |
print("CUDA dispon铆vel:", torch.cuda.is_available())
|
@@ -24,11 +23,18 @@ comfyui_path = os.path.join(current_dir, "ComfyUI")
|
|
24 |
sys.path.append(comfyui_path)
|
25 |
|
26 |
# Importar ComfyUI components
|
27 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
28 |
import folder_paths
|
29 |
-
|
30 |
-
|
31 |
-
import
|
32 |
|
33 |
# Configura莽茫o de diret贸rios
|
34 |
BASE_DIR = os.path.dirname(os.path.realpath(__file__))
|
@@ -36,13 +42,6 @@ output_dir = os.path.join(BASE_DIR, "output")
|
|
36 |
os.makedirs(output_dir, exist_ok=True)
|
37 |
folder_paths.set_output_directory(output_dir)
|
38 |
|
39 |
-
# Inicializar o servidor e os n贸s
|
40 |
-
loop = asyncio.new_event_loop()
|
41 |
-
asyncio.set_event_loop(loop)
|
42 |
-
server_instance = server.PromptServer(loop)
|
43 |
-
execution.PromptQueue(server_instance)
|
44 |
-
init_custom_nodes()
|
45 |
-
|
46 |
# Helper function
|
47 |
def get_value_at_index(obj: Union[Sequence, Mapping], index: int) -> Any:
|
48 |
try:
|
@@ -50,7 +49,7 @@ def get_value_at_index(obj: Union[Sequence, Mapping], index: int) -> Any:
|
|
50 |
except KeyError:
|
51 |
return obj["result"][index]
|
52 |
|
53 |
-
# Baixar modelos
|
54 |
def download_models():
|
55 |
print("Baixando modelos...")
|
56 |
models = [
|
@@ -58,9 +57,8 @@ def download_models():
|
|
58 |
("comfyanonymous/flux_text_encoders", "t5xxl_fp16.safetensors", "models/text_encoders"),
|
59 |
("zer0int/CLIP-GmP-ViT-L-14", "ViT-L-14-TEXT-detail-improved-hiT-GmP-HF.safetensors", "models/text_encoders"),
|
60 |
("black-forest-labs/FLUX.1-dev", "ae.safetensors", "models/vae"),
|
61 |
-
("black-forest-labs/FLUX.1-dev", "flux1-dev.safetensors", "models/diffusion_models"),
|
62 |
-
("google/siglip-so400m-patch14-384", "model.safetensors", "models/clip_vision")
|
63 |
-
("nftnik/NFTNIK-FLUX.1-dev-LoRA", "NFTNIK_V1.safetensors", "models/lora")
|
64 |
]
|
65 |
|
66 |
for repo_id, filename, local_dir in models:
|
@@ -70,88 +68,37 @@ def download_models():
|
|
70 |
hf_hub_download(repo_id=repo_id, filename=filename, local_dir=local_dir)
|
71 |
except Exception as e:
|
72 |
print(f"Erro ao baixar {filename} de {repo_id}: {str(e)}")
|
73 |
-
# Continue mesmo se um download falhar
|
74 |
continue
|
75 |
|
76 |
-
# Download models
|
77 |
download_models()
|
78 |
|
79 |
# Inicializar modelos
|
80 |
print("Inicializando modelos...")
|
81 |
with torch.inference_mode():
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
clip_name1="models/text_encoders/t5xxl_fp16.safetensors",
|
87 |
-
clip_name2="models/text_encoders/ViT-L-14-TEXT-detail-improved-hiT-GmP-HF.safetensors",
|
88 |
-
type="flux",
|
89 |
-
)
|
90 |
-
stylemodelloader = NODE_CLASS_MAPPINGS["StyleModelLoader"]()
|
91 |
-
stylemodelloader_441 = stylemodelloader.load_style_model(
|
92 |
-
style_model_name="models/style_models/flux1-redux-dev.safetensors"
|
93 |
-
)
|
94 |
-
vaeloader = NODE_CLASS_MAPPINGS["VAELoader"]()
|
95 |
-
vaeloader_359 = vaeloader.load_vae(vae_name="models/vae/ae.safetensors")
|
96 |
-
|
97 |
-
# Carregar modelos na GPU
|
98 |
-
model_loaders = [dualcliploader_357, vaeloader_359, stylemodelloader_441]
|
99 |
-
valid_models = [
|
100 |
-
getattr(loader[0], 'patcher', loader[0])
|
101 |
-
for loader in model_loaders
|
102 |
-
if not isinstance(loader[0], dict) and not isinstance(getattr(loader[0], 'patcher', None), dict)
|
103 |
-
]
|
104 |
-
model_management.load_models_gpu(valid_models)
|
105 |
|
106 |
@spaces.GPU
|
107 |
-
def generate_image(prompt, input_image,
|
108 |
-
"""Fun莽茫o principal de gera莽茫o com monitoramento de progresso"""
|
109 |
try:
|
110 |
with torch.inference_mode():
|
111 |
-
#
|
112 |
-
|
113 |
-
encoded_text = cliptextencode.encode(
|
114 |
-
text=prompt,
|
115 |
-
clip=get_value_at_index(dualcliploader_357, 0)
|
116 |
-
)
|
117 |
-
|
118 |
-
# Carregar LoRA
|
119 |
-
loraloadermodelonly = NODE_CLASS_MAPPINGS["LoraLoaderModelOnly"]()
|
120 |
-
lora_model = loraloadermodelonly.load_lora_model_only(
|
121 |
-
lora_name="models/lora/NFTNIK_FLUX.1[dev]_LoRA.safetensors",
|
122 |
-
strength_model=lora_weight,
|
123 |
-
model=get_value_at_index(stylemodelloader_441, 0)
|
124 |
-
)
|
125 |
-
|
126 |
-
# Processar imagem
|
127 |
-
loadimage = NODE_CLASS_MAPPINGS["LoadImage"]()
|
128 |
-
loaded_image = loadimage.load_image(image=input_image)
|
129 |
-
|
130 |
-
# Decodificar
|
131 |
-
vaedecode = NODE_CLASS_MAPPINGS["VAEDecode"]()
|
132 |
-
decoded = vaedecode.decode(
|
133 |
-
samples=get_value_at_index(lora_model, 0),
|
134 |
-
vae=get_value_at_index(vaeloader_359, 0)
|
135 |
-
)
|
136 |
-
|
137 |
-
# Salvar imagem
|
138 |
-
temp_filename = f"Flux_{random.randint(0, 99999)}.png"
|
139 |
-
temp_path = os.path.join(output_dir, temp_filename)
|
140 |
-
Image.fromarray((get_value_at_index(decoded, 0) * 255).astype("uint8")).save(temp_path)
|
141 |
-
|
142 |
-
return temp_path
|
143 |
except Exception as e:
|
144 |
print(f"Erro ao gerar imagem: {str(e)}")
|
145 |
return None
|
146 |
|
147 |
# Interface Gradio
|
148 |
with gr.Blocks() as app:
|
149 |
-
gr.Markdown("# Gerador de Imagens FLUX
|
150 |
with gr.Row():
|
151 |
with gr.Column():
|
152 |
prompt_input = gr.Textbox(label="Prompt", placeholder="Digite seu prompt aqui...", lines=5)
|
153 |
input_image = gr.Image(label="Imagem de Entrada", type="filepath")
|
154 |
-
|
155 |
generate_btn = gr.Button("Gerar Imagem")
|
156 |
|
157 |
with gr.Column():
|
@@ -159,7 +106,7 @@ with gr.Blocks() as app:
|
|
159 |
|
160 |
generate_btn.click(
|
161 |
fn=generate_image,
|
162 |
-
inputs=[prompt_input, input_image,
|
163 |
outputs=[output_image]
|
164 |
)
|
165 |
|
|
|
8 |
from huggingface_hub import hf_hub_download
|
9 |
import spaces
|
10 |
from typing import Union, Sequence, Mapping, Any
|
|
|
11 |
|
12 |
+
# Configura莽茫o inicial e diagn贸stico CUDA
|
13 |
print("Python version:", sys.version)
|
14 |
print("Torch version:", torch.__version__)
|
15 |
print("CUDA dispon铆vel:", torch.cuda.is_available())
|
|
|
23 |
sys.path.append(comfyui_path)
|
24 |
|
25 |
# Importar ComfyUI components
|
26 |
+
sys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(__file__)), "ComfyUI/comfy"))
|
27 |
+
|
28 |
+
import comfy.diffusers_load
|
29 |
+
import comfy.samplers
|
30 |
+
import comfy.sample
|
31 |
+
import comfy.sd
|
32 |
+
import comfy.utils
|
33 |
+
from comfy.cli_args import args
|
34 |
import folder_paths
|
35 |
+
|
36 |
+
# Importar n贸s do ComfyUI
|
37 |
+
from nodes import CLIPTextEncode, VAEDecode, EmptyLatentImage, VAEEncode
|
38 |
|
39 |
# Configura莽茫o de diret贸rios
|
40 |
BASE_DIR = os.path.dirname(os.path.realpath(__file__))
|
|
|
42 |
os.makedirs(output_dir, exist_ok=True)
|
43 |
folder_paths.set_output_directory(output_dir)
|
44 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
45 |
# Helper function
|
46 |
def get_value_at_index(obj: Union[Sequence, Mapping], index: int) -> Any:
|
47 |
try:
|
|
|
49 |
except KeyError:
|
50 |
return obj["result"][index]
|
51 |
|
52 |
+
# Baixar modelos
|
53 |
def download_models():
|
54 |
print("Baixando modelos...")
|
55 |
models = [
|
|
|
57 |
("comfyanonymous/flux_text_encoders", "t5xxl_fp16.safetensors", "models/text_encoders"),
|
58 |
("zer0int/CLIP-GmP-ViT-L-14", "ViT-L-14-TEXT-detail-improved-hiT-GmP-HF.safetensors", "models/text_encoders"),
|
59 |
("black-forest-labs/FLUX.1-dev", "ae.safetensors", "models/vae"),
|
60 |
+
("black-forest-labs/FLUX.1-dev", "flux1-dev.safetensors", "models/diffusion_models"),
|
61 |
+
("google/siglip-so400m-patch14-384", "model.safetensors", "models/clip_vision")
|
|
|
62 |
]
|
63 |
|
64 |
for repo_id, filename, local_dir in models:
|
|
|
68 |
hf_hub_download(repo_id=repo_id, filename=filename, local_dir=local_dir)
|
69 |
except Exception as e:
|
70 |
print(f"Erro ao baixar {filename} de {repo_id}: {str(e)}")
|
|
|
71 |
continue
|
72 |
|
73 |
+
# Download models no in铆cio
|
74 |
download_models()
|
75 |
|
76 |
# Inicializar modelos
|
77 |
print("Inicializando modelos...")
|
78 |
with torch.inference_mode():
|
79 |
+
clip_text_encode = CLIPTextEncode()
|
80 |
+
vae_decode = VAEDecode()
|
81 |
+
vae_encode = VAEEncode()
|
82 |
+
empty_latent = EmptyLatentImage()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
83 |
|
84 |
@spaces.GPU
|
85 |
+
def generate_image(prompt, input_image, strength, progress=gr.Progress(track_tqdm=True)):
|
|
|
86 |
try:
|
87 |
with torch.inference_mode():
|
88 |
+
# Seu c贸digo de gera莽茫o aqui
|
89 |
+
pass
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
90 |
except Exception as e:
|
91 |
print(f"Erro ao gerar imagem: {str(e)}")
|
92 |
return None
|
93 |
|
94 |
# Interface Gradio
|
95 |
with gr.Blocks() as app:
|
96 |
+
gr.Markdown("# Gerador de Imagens FLUX")
|
97 |
with gr.Row():
|
98 |
with gr.Column():
|
99 |
prompt_input = gr.Textbox(label="Prompt", placeholder="Digite seu prompt aqui...", lines=5)
|
100 |
input_image = gr.Image(label="Imagem de Entrada", type="filepath")
|
101 |
+
strength = gr.Slider(minimum=0, maximum=2, step=0.1, value=1.0, label="For莽a")
|
102 |
generate_btn = gr.Button("Gerar Imagem")
|
103 |
|
104 |
with gr.Column():
|
|
|
106 |
|
107 |
generate_btn.click(
|
108 |
fn=generate_image,
|
109 |
+
inputs=[prompt_input, input_image, strength],
|
110 |
outputs=[output_image]
|
111 |
)
|
112 |
|