nftnik commited on
Commit
0c0098b
·
verified ·
1 Parent(s): ed9bd08

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +81 -110
app.py CHANGED
@@ -1,25 +1,29 @@
1
  import os
 
2
  import random
3
  import torch
4
  from pathlib import Path
5
  from PIL import Image
6
  import gradio as gr
7
  from huggingface_hub import hf_hub_download
8
- import sys
 
9
 
10
- # 1. Configuração de Caminhos
11
  current_dir = os.path.dirname(os.path.abspath(__file__))
12
  comfyui_path = os.path.join(current_dir, "ComfyUI")
13
  sys.path.append(comfyui_path)
14
 
15
  # 2. Imports do ComfyUI
16
- from nodes import NODE_CLASS_MAPPINGS
17
  import folder_paths
 
18
 
19
  # 3. Configuração de Diretórios
20
  BASE_DIR = os.path.dirname(os.path.realpath(__file__))
21
  output_dir = os.path.join(BASE_DIR, "output")
 
22
  os.makedirs(output_dir, exist_ok=True)
 
23
  folder_paths.set_output_directory(output_dir)
24
 
25
  # 4. Diagnóstico CUDA
@@ -29,102 +33,90 @@ print("CUDA disponível:", torch.cuda.is_available())
29
  print("Quantidade de GPUs:", torch.cuda.device_count())
30
  if torch.cuda.is_available():
31
  print("GPU atual:", torch.cuda.get_device_name(0))
32
- else:
33
- print("GPU não disponível. Usando CPU.")
34
 
35
- # 5. Download de Modelos
 
 
 
 
 
 
 
 
 
 
 
36
  def download_models():
 
37
  models = [
38
  ("black-forest-labs/FLUX.1-Redux-dev", "flux1-redux-dev.safetensors", "style_models"),
39
  ("comfyanonymous/flux_text_encoders", "t5xxl_fp16.safetensors", "text_encoders"),
40
- ("zer0int/CLIP-GmP-ViT-L-14", "ViT-L-14-TEXT-detail-improved-hiT-GmP-TE-only-HF.safetensors", "text_encoders"),
41
  ("black-forest-labs/FLUX.1-dev", "ae.safetensors", "vae"),
42
- ("black-forest-labs/FLUX.1-dev", "flux1-dev.sft", "diffusion_models"),
43
- ("google/siglip-so400m-patch14-384", "model.safetensors", "clip_vision"),
44
- ("black-forest-labs/FLUX.1-Redux-dev", "NFTNIK_FLUX.1[dev]_LoRA.safetensors", "lora")
45
  ]
46
 
47
  for repo_id, filename, model_type in models:
48
- model_dir = os.path.join(BASE_DIR, "models", model_type)
49
- os.makedirs(model_dir, exist_ok=True)
50
- print(f"Baixando {filename} de {repo_id}...")
51
- hf_hub_download(repo_id=repo_id, filename=filename, local_dir=model_dir)
52
- folder_paths.add_model_folder_path(model_type, model_dir)
 
 
 
 
 
53
 
54
- # 6. Load custom nodes
55
- def import_custom_nodes():
56
- import asyncio
57
- import execution
58
- from nodes import init_extra_nodes
59
- import server
60
 
61
- loop = asyncio.new_event_loop()
62
- asyncio.set_event_loop(loop)
 
 
 
 
 
 
 
63
 
64
- server_instance = server.PromptServer(loop)
65
- execution.PromptQueue(server_instance)
66
- init_extra_nodes()
 
 
67
 
68
- # 7. Main function to execute the workflow and generate an image
69
- def generate_image(prompt, input_image, lora_weight, guidance, downsampling_factor, weight, seed, width, height, batch_size, steps):
70
- import_custom_nodes()
71
-
72
- try:
73
- with torch.inference_mode():
74
- device = "cuda" if torch.cuda.is_available() else "cpu"
75
- print(f"Using device: {device}")
76
 
77
- # Load CLIP
78
- dualcliploader = NODE_CLASS_MAPPINGS["DualCLIPLoader"]()
79
- dualcliploader_loaded = dualcliploader.load_clip(
80
- clip_name1="t5xxl_fp16.safetensors",
81
- clip_name2="ViT-L-14-TEXT-detail-improved-hiT-GmP-TE-only-HF.safetensors",
82
- type="flux",
83
- device=device
84
- )
85
 
86
- # Text Encoding
 
 
 
 
 
87
  cliptextencode = NODE_CLASS_MAPPINGS["CLIPTextEncode"]()
88
  encoded_text = cliptextencode.encode(
89
  text=prompt,
90
- clip=dualcliploader_loaded[0]
91
  )
92
 
93
- # Load Style Model
94
- stylemodelloader = NODE_CLASS_MAPPINGS["StyleModelLoader"]()
95
- style_model = stylemodelloader.load_style_model(
96
- style_model_name="flux1-redux-dev.safetensors"
97
- )
98
-
99
- # Load CLIP Vision
100
- clipvisionloader = NODE_CLASS_MAPPINGS["CLIPVisionLoader"]()
101
- clip_vision = clipvisionloader.load_clip(
102
- clip_name="model.safetensors"
103
- )
104
-
105
- # Load Input Image
106
  loadimage = NODE_CLASS_MAPPINGS["LoadImage"]()
107
  loaded_image = loadimage.load_image(image=input_image)
108
 
109
- # Load VAE
110
- vaeloader = NODE_CLASS_MAPPINGS["VAELoader"]()
111
- vae = vaeloader.load_vae(vae_name="ae.safetensors")
112
-
113
- # Load UNET
114
- unetloader = NODE_CLASS_MAPPINGS["UNETLoader"]()
115
- unet = unetloader.load_unet(
116
- unet_name="flux1-dev.sft",
117
- weight_dtype="fp8_e4m3fn"
118
- )
119
-
120
- # Load LoRA
121
- loraloadermodelonly = NODE_CLASS_MAPPINGS["LoraLoaderModelOnly"]()
122
- lora_model = loraloadermodelonly.load_lora_model_only(
123
- lora_name="NFTNIK_FLUX.1[dev]_LoRA.safetensors",
124
- strength_model=lora_weight,
125
- model=unet[0]
126
- )
127
-
128
  # Flux Guidance
129
  fluxguidance = NODE_CLASS_MAPPINGS["FluxGuidance"]()
130
  flux_guidance = fluxguidance.append(
@@ -139,14 +131,13 @@ def generate_image(prompt, input_image, lora_weight, guidance, downsampling_fact
139
  downsampling_function="area",
140
  mode="keep aspect ratio",
141
  weight=weight,
142
- autocrop_margin=0.1,
143
  conditioning=flux_guidance[0],
144
- style_model=style_model[0],
145
  clip_vision=clip_vision[0],
146
  image=loaded_image[0]
147
  )
148
 
149
- # Empty Latent Image
150
  emptylatentimage = NODE_CLASS_MAPPINGS["EmptyLatentImage"]()
151
  empty_latent = emptylatentimage.generate(
152
  width=width,
@@ -163,49 +154,30 @@ def generate_image(prompt, input_image, lora_weight, guidance, downsampling_fact
163
  sampler_name="euler",
164
  scheduler="simple",
165
  denoise=1,
166
- model=lora_model[0],
167
  positive=redux_result[0],
168
  negative=flux_guidance[0],
169
  latent_image=empty_latent[0]
170
  )
171
 
172
- # VAE Decode
173
  vaedecode = NODE_CLASS_MAPPINGS["VAEDecode"]()
174
  decoded = vaedecode.decode(
175
  samples=sampled[0],
176
- vae=vae[0]
177
- )
178
-
179
- # Save the image in the output directory
180
- saveimage = NODE_CLASS_MAPPINGS["SaveImage"]()
181
- temp_filename = f"Flux_{random.randint(0, 99999)}"
182
- saveimage.save_images(
183
- filename_prefix=temp_filename,
184
- images=decoded[0]
185
  )
186
 
187
- # Add a delay to ensure the file system updates
188
- import time
189
- time.sleep(0.5)
190
-
191
- # Dynamically retrieve the correct file name
192
- saved_files = [f for f in os.listdir(output_dir) if f.startswith(temp_filename)]
193
- if not saved_files:
194
- raise FileNotFoundError(f"Output file not found: Expected files starting with {temp_filename}")
195
-
196
- # Get the full path of the saved file
197
- temp_path = os.path.join(output_dir, saved_files[0])
198
- print(f"Image saved at: {temp_path}")
199
-
200
- # Return the saved image for Gradio display
201
- output_image = Image.open(temp_path)
202
- return output_image
203
 
 
204
  except Exception as e:
205
- print(f"Error during generation: {str(e)}")
206
  return None
207
 
208
- # 8. Gradio Interface
209
  with gr.Blocks() as app:
210
  gr.Markdown("# FLUX Redux Image Generator")
211
 
@@ -281,7 +253,7 @@ with gr.Blocks() as app:
281
  generate_btn = gr.Button("Generate Image")
282
 
283
  with gr.Column():
284
- output_image = gr.Image(label="Generated Image", type="pil")
285
 
286
  generate_btn.click(
287
  fn=generate_image,
@@ -302,5 +274,4 @@ with gr.Blocks() as app:
302
  )
303
 
304
  if __name__ == "__main__":
305
- # Download_models()
306
  app.launch(share=True)
 
1
  import os
2
+ import sys
3
  import random
4
  import torch
5
  from pathlib import Path
6
  from PIL import Image
7
  import gradio as gr
8
  from huggingface_hub import hf_hub_download
9
+ import spaces
10
+ from typing import Union, Sequence, Mapping, Any
11
 
12
+ # 1. Configuração de Caminhos e Imports
13
  current_dir = os.path.dirname(os.path.abspath(__file__))
14
  comfyui_path = os.path.join(current_dir, "ComfyUI")
15
  sys.path.append(comfyui_path)
16
 
17
  # 2. Imports do ComfyUI
 
18
  import folder_paths
19
+ from nodes import NODE_CLASS_MAPPINGS, init_extra_nodes
20
 
21
  # 3. Configuração de Diretórios
22
  BASE_DIR = os.path.dirname(os.path.realpath(__file__))
23
  output_dir = os.path.join(BASE_DIR, "output")
24
+ models_dir = os.path.join(BASE_DIR, "models")
25
  os.makedirs(output_dir, exist_ok=True)
26
+ os.makedirs(models_dir, exist_ok=True)
27
  folder_paths.set_output_directory(output_dir)
28
 
29
  # 4. Diagnóstico CUDA
 
33
  print("Quantidade de GPUs:", torch.cuda.device_count())
34
  if torch.cuda.is_available():
35
  print("GPU atual:", torch.cuda.get_device_name(0))
 
 
36
 
37
+ # 5. Inicialização do ComfyUI
38
+ print("Inicializando ComfyUI...")
39
+ init_extra_nodes()
40
+
41
+ # 6. Helper Functions
42
+ def get_value_at_index(obj: Union[Sequence, Mapping], index: int) -> Any:
43
+ try:
44
+ return obj[index]
45
+ except KeyError:
46
+ return obj["result"][index]
47
+
48
+ # 7. Download de Modelos
49
  def download_models():
50
+ print("Baixando modelos...")
51
  models = [
52
  ("black-forest-labs/FLUX.1-Redux-dev", "flux1-redux-dev.safetensors", "style_models"),
53
  ("comfyanonymous/flux_text_encoders", "t5xxl_fp16.safetensors", "text_encoders"),
54
+ ("zer0int/CLIP-GmP-ViT-L-14", "ViT-L-14-TEXT-detail-improved-hiT-GmP-HF.safetensors", "text_encoders"),
55
  ("black-forest-labs/FLUX.1-dev", "ae.safetensors", "vae"),
56
+ ("black-forest-labs/FLUX.1-dev", "flux1-dev.safetensors", "diffusion_models"),
57
+ ("google/siglip-so400m-patch14-384", "model.safetensors", "clip_vision")
 
58
  ]
59
 
60
  for repo_id, filename, model_type in models:
61
+ try:
62
+ model_dir = os.path.join(models_dir, model_type)
63
+ os.makedirs(model_dir, exist_ok=True)
64
+ print(f"Baixando {filename} de {repo_id}...")
65
+ hf_hub_download(repo_id=repo_id, filename=filename, local_dir=model_dir)
66
+ # Adicionar o diretório ao folder_paths
67
+ folder_paths.add_model_folder_path(model_type, model_dir)
68
+ except Exception as e:
69
+ print(f"Erro ao baixar {filename} de {repo_id}: {str(e)}")
70
+ continue
71
 
72
+ # 8. Download e Inicialização dos Modelos
73
+ print("Baixando modelos...")
74
+ download_models()
 
 
 
75
 
76
+ print("Inicializando modelos...")
77
+ with torch.inference_mode():
78
+ # CLIP
79
+ dualcliploader = NODE_CLASS_MAPPINGS["DualCLIPLoader"]()
80
+ dualcliploader_357 = dualcliploader.load_clip(
81
+ clip_name1="t5xxl_fp16.safetensors",
82
+ clip_name2="ViT-L-14-TEXT-detail-improved-hiT-GmP-HF.safetensors",
83
+ type="flux"
84
+ )
85
 
86
+ # CLIP Vision
87
+ clipvisionloader = NODE_CLASS_MAPPINGS["CLIPVisionLoader"]()
88
+ clip_vision = clipvisionloader.load_clip(
89
+ clip_name="model.safetensors"
90
+ )
91
 
92
+ # Style Model
93
+ stylemodelloader = NODE_CLASS_MAPPINGS["StyleModelLoader"]()
94
+ stylemodelloader_441 = stylemodelloader.load_style_model(
95
+ style_model_name="flux1-redux-dev.safetensors"
96
+ )
 
 
 
97
 
98
+ # VAE
99
+ vaeloader = NODE_CLASS_MAPPINGS["VAELoader"]()
100
+ vaeloader_359 = vaeloader.load_vae(
101
+ vae_name="ae.safetensors"
102
+ )
 
 
 
103
 
104
+ # 9. Função de Geração
105
+ @spaces.GPU
106
+ def generate_image(prompt, input_image, lora_weight, guidance, downsampling_factor, weight, seed, width, height, batch_size, steps, progress=gr.Progress(track_tqdm=True)):
107
+ try:
108
+ with torch.inference_mode():
109
+ # Codificar texto
110
  cliptextencode = NODE_CLASS_MAPPINGS["CLIPTextEncode"]()
111
  encoded_text = cliptextencode.encode(
112
  text=prompt,
113
+ clip=dualcliploader_357[0]
114
  )
115
 
116
+ # Carregar e processar imagem
 
 
 
 
 
 
 
 
 
 
 
 
117
  loadimage = NODE_CLASS_MAPPINGS["LoadImage"]()
118
  loaded_image = loadimage.load_image(image=input_image)
119
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
120
  # Flux Guidance
121
  fluxguidance = NODE_CLASS_MAPPINGS["FluxGuidance"]()
122
  flux_guidance = fluxguidance.append(
 
131
  downsampling_function="area",
132
  mode="keep aspect ratio",
133
  weight=weight,
 
134
  conditioning=flux_guidance[0],
135
+ style_model=stylemodelloader_441[0],
136
  clip_vision=clip_vision[0],
137
  image=loaded_image[0]
138
  )
139
 
140
+ # Empty Latent
141
  emptylatentimage = NODE_CLASS_MAPPINGS["EmptyLatentImage"]()
142
  empty_latent = emptylatentimage.generate(
143
  width=width,
 
154
  sampler_name="euler",
155
  scheduler="simple",
156
  denoise=1,
157
+ model=stylemodelloader_441[0],
158
  positive=redux_result[0],
159
  negative=flux_guidance[0],
160
  latent_image=empty_latent[0]
161
  )
162
 
163
+ # Decodificar VAE
164
  vaedecode = NODE_CLASS_MAPPINGS["VAEDecode"]()
165
  decoded = vaedecode.decode(
166
  samples=sampled[0],
167
+ vae=vaeloader_359[0]
 
 
 
 
 
 
 
 
168
  )
169
 
170
+ # Salvar imagem
171
+ temp_filename = f"Flux_{random.randint(0, 99999)}.png"
172
+ temp_path = os.path.join(output_dir, temp_filename)
173
+ Image.fromarray((decoded[0] * 255).astype("uint8")).save(temp_path)
 
 
 
 
 
 
 
 
 
 
 
 
174
 
175
+ return temp_path
176
  except Exception as e:
177
+ print(f"Erro ao gerar imagem: {str(e)}")
178
  return None
179
 
180
+ # 10. Interface Gradio
181
  with gr.Blocks() as app:
182
  gr.Markdown("# FLUX Redux Image Generator")
183
 
 
253
  generate_btn = gr.Button("Generate Image")
254
 
255
  with gr.Column():
256
+ output_image = gr.Image(label="Generated Image", type="filepath")
257
 
258
  generate_btn.click(
259
  fn=generate_image,
 
274
  )
275
 
276
  if __name__ == "__main__":
 
277
  app.launch(share=True)