nftnik commited on
Commit
c8c745b
·
verified ·
1 Parent(s): 66c3c3a

Update gradio_app.py

Browse files
Files changed (1) hide show
  1. gradio_app.py +42 -197
gradio_app.py CHANGED
@@ -8,8 +8,6 @@ import gradio as gr
8
  from PIL import Image
9
  from huggingface_hub import hf_hub_download
10
 
11
- import spaces # Se estiver no Hugging Face Spaces. Se não, pode remover.
12
-
13
  #####################################
14
  # 1. Funções auxiliares de caminho e import
15
  #####################################
@@ -36,45 +34,18 @@ def add_comfyui_directory_to_sys_path() -> None:
36
  else:
37
  print("Não foi possível encontrar o diretório ComfyUI.")
38
 
39
- def add_extra_model_paths() -> None:
40
- """
41
- Carrega configurações extras de caminhos de modelos, se existir
42
- um arquivo 'extra_model_paths.yaml'.
43
- """
44
- try:
45
- from main import load_extra_path_config
46
- except ImportError:
47
- # Dependendo da versão do ComfyUI, pode estar em 'utils.extra_config'
48
- from utils.extra_config import load_extra_path_config
49
-
50
- extra_model_paths = find_path("extra_model_paths.yaml")
51
- if extra_model_paths is not None:
52
- load_extra_path_config(extra_model_paths)
53
- else:
54
- print("Arquivo extra_model_paths.yaml não foi encontrado.")
55
-
56
  def import_custom_nodes() -> None:
57
  """
58
- Executa a inicialização de nós extras e o servidor do ComfyUI (caso necessário),
59
- similar ao que ocorre no segundo script.
60
  """
61
- import asyncio
62
- import execution
63
  from nodes import init_extra_nodes
64
- import server
65
-
66
- loop = asyncio.new_event_loop()
67
- asyncio.set_event_loop(loop)
68
- server_instance = server.PromptServer(loop)
69
- execution.PromptQueue(server_instance)
70
  init_extra_nodes()
71
 
72
  #####################################
73
- # 2. Ajustando o ambiente ComfyUI
74
  #####################################
75
 
76
  add_comfyui_directory_to_sys_path()
77
- add_extra_model_paths()
78
  import_custom_nodes()
79
 
80
  #####################################
@@ -97,47 +68,42 @@ from nodes import (
97
  # 4. Download de modelos (ajuste conforme sua necessidade)
98
  #####################################
99
 
100
- # Exemplo de downloads (ajuste conforme seus modelos):
101
  os.makedirs("models/text_encoders", exist_ok=True)
102
  os.makedirs("models/style_models", exist_ok=True)
103
  os.makedirs("models/diffusion_models", exist_ok=True)
104
  os.makedirs("models/vae", exist_ok=True)
105
  os.makedirs("models/clip_vision", exist_ok=True)
106
 
 
107
  try:
108
- print("Baixando modelo Style (flux1-redux-dev.safetensors)...")
109
  hf_hub_download(repo_id="black-forest-labs/FLUX.1-Redux-dev",
110
  filename="flux1-redux-dev.safetensors",
111
  local_dir="models/style_models")
112
- print("Baixando T5 (t5xxl_fp16.safetensors)...")
113
  hf_hub_download(repo_id="comfyanonymous/flux_text_encoders",
114
  filename="t5xxl_fp16.safetensors",
115
  local_dir="models/text_encoders")
116
-
117
- print("Baixando CLIP L (ViT-L-14) ...")
118
  hf_hub_download(repo_id="zer0int/CLIP-GmP-ViT-L-14",
119
  filename="ViT-L-14-TEXT-detail-improved-hiT-GmP-HF.safetensors",
120
  local_dir="models/text_encoders")
121
- print("Baixando VAE (ae.safetensors)...")
122
  hf_hub_download(repo_id="black-forest-labs/FLUX.1-dev",
123
  filename="ae.safetensors",
124
  local_dir="models/vae")
125
- print("Baixando flux1-dev.safetensors (modelo difusão)...")
126
  hf_hub_download(repo_id="black-forest-labs/FLUX.1-dev",
127
  filename="flux1-dev.safetensors",
128
  local_dir="models/diffusion_models")
129
- print("Baixando CLIP Vision (model.safetensors)...")
130
  hf_hub_download(repo_id="google/siglip-so400m-patch14-384",
131
  filename="model.safetensors",
132
  local_dir="models/clip_vision")
133
  except Exception as e:
134
- print("Algum download falhou:", e)
135
 
136
  #####################################
137
- # 5. Carregar modelos via ComfyUI
138
  #####################################
139
 
140
- # Carregando CLIP (DualCLIPLoader)
141
  dualcliploader = DualCLIPLoader()
142
  clip_model = dualcliploader.load_clip(
143
  clip_name1="t5xxl_fp16.safetensors",
@@ -145,90 +111,77 @@ clip_model = dualcliploader.load_clip(
145
  type="flux"
146
  )
147
 
148
- # Carregando CLIP Vision
149
  clipvisionloader = CLIPVisionLoader()
150
  clip_vision_model = clipvisionloader.load_clip(
151
  clip_name="model.safetensors"
152
  )
153
 
154
- # Carregando Style Model
155
  stylemodelloader = StyleModelLoader()
156
  style_model = stylemodelloader.load_style_model(
157
  style_model_name="flux1-redux-dev.safetensors"
158
  )
159
 
160
- # Carregando VAE
161
  vaeloader = VAELoader()
162
  vae_model = vaeloader.load_vae(
163
  vae_name="ae.safetensors"
164
  )
165
 
166
- # (Opcional) Se tiver um model UNet, faça UNETLoader, etc.
167
-
168
- # Opcional: Carregar para GPU
169
  model_management.load_models_gpu([
170
- loader[0] for loader in [clip_model, clip_vision_model, style_model, vae_model]
171
  ])
172
 
173
  #####################################
174
- # 6. Funções auxiliares e placeholders
175
  #####################################
176
 
177
  def get_value_at_index(obj: Union[Sequence, Mapping], index: int) -> Any:
178
- """Retorna o 'index' de um objeto que pode ser um dict ou lista."""
179
  try:
180
  return obj[index]
181
  except KeyError:
182
  return obj["result"][index]
183
 
184
- #####################################
185
- # 7. Definir workflow simplificado
186
- #####################################
187
-
188
- @spaces.GPU # Se estiver no Hugging Face Spaces. Senão, remova.
189
  def generate_image(
190
  prompt: str,
191
  input_image_path: str,
192
- lora_weight: float,
193
  guidance: float,
194
  downsampling_factor: float,
195
  weight: float,
196
  seed: int,
197
  width: int,
198
  height: int,
199
- batch_size: int,
200
  steps: int,
201
  progress=gr.Progress(track_tqdm=True)
202
  ):
203
  """
204
- Gera imagem usando um fluxo simplificado, similar ao primeiro script.
205
  """
206
  try:
207
  # Garantindo repetibilidade do seed
208
  torch.manual_seed(seed)
209
  random.seed(seed)
210
 
211
- # 1) Encode Texto
212
  cliptextencode = CLIPTextEncode()
213
  encoded_text = cliptextencode.encode(
214
  text=prompt,
215
  clip=get_value_at_index(clip_model, 0)
216
  )
217
 
218
- # 2) Carregar imagem de entrada
219
  loadimage = LoadImage()
220
  loaded_image = loadimage.load_image(image=input_image_path)
221
 
222
- # 3) Flux Guidance (se existir)
223
  fluxguidance = NODE_CLASS_MAPPINGS["FluxGuidance"]()
224
  flux_guided = fluxguidance.append(
225
  guidance=guidance,
226
  conditioning=get_value_at_index(encoded_text, 0)
227
  )
228
 
229
- # 4) Redux Advanced (aplicar style model)
230
  reduxadvanced = NODE_CLASS_MAPPINGS["ReduxAdvanced"]()
231
- redux_result = reduxadvanced.apply_stylemodel(
232
  downsampling_factor=downsampling_factor,
233
  downsampling_function="area",
234
  mode="keep aspect ratio",
@@ -239,163 +192,55 @@ def generate_image(
239
  image=get_value_at_index(loaded_image, 0)
240
  )
241
 
242
- # 5) Empty Latent
243
- emptylatent = EmptyLatentImage()
244
- empty_latent = emptylatent.generate(
245
- width=width,
246
- height=height,
247
- batch_size=batch_size
248
- )
249
-
250
- # 6) KSampler (no ComfyUI atual, há "KSamplerSelect" ou "KSampler")
251
- ksampler = NODE_CLASS_MAPPINGS["KSampler"]()
252
- sampled = ksampler.sample(
253
- seed=seed,
254
- steps=steps,
255
- cfg=1, # Exemplo de CFG = 1
256
- sampler_name="euler",
257
- scheduler="simple",
258
- denoise=1,
259
- model=get_value_at_index(style_model, 0), # Usa o style model como UNet? (depende da config)
260
- positive=get_value_at_index(redux_result, 0),
261
- negative=get_value_at_index(flux_guided, 0),
262
- latent_image=get_value_at_index(empty_latent, 0)
263
- )
264
-
265
- # 7) Decodificar VAE
266
  vaedecode = VAEDecode()
267
- decoded = vaedecode.decode(
268
- samples=get_value_at_index(sampled, 0),
269
  vae=get_value_at_index(vae_model, 0)
270
  )
271
 
272
- # 8) Salvar imagem
273
  output_dir = "output"
274
  os.makedirs(output_dir, exist_ok=True)
275
- temp_filename = f"Flux_{random.randint(0, 99999)}.png"
276
- temp_path = os.path.join(output_dir, temp_filename)
277
-
278
- # No ComfyUI, 'decoded[0]' pode ser um tensor [C,H,W] normalizado
279
- # ou algo no formato [N,C,H,W]. Precisamos converter para PIL:
280
- # Se for um batch, pegue o primeiro item. Ajuste se quiser batch maior.
281
- image_data = get_value_at_index(decoded, 0)
282
- # Normalmente, se for "float [0,1]" em C,H,W:
283
- # Precisamos mover pro CPU e converter em numpy
284
- if isinstance(image_data, torch.Tensor):
285
- image_data = image_data.cpu().numpy()
286
-
287
- # Se a imagem estiver em [C,H,W], transpor para [H,W,C] e escalar 0..255
288
- if len(image_data.shape) == 3:
289
- image_data = image_data.transpose(1, 2, 0)
290
- image_data = (image_data * 255).clip(0, 255).astype("uint8")
291
-
292
- pil_image = Image.fromarray(image_data)
293
- pil_image.save(temp_path)
294
-
295
- return temp_path
296
  except Exception as e:
297
- print(f"Erro ao gerar imagem: {str(e)}")
298
  return None
299
 
300
  #####################################
301
- # 8. Interface Gradio (similar ao primeiro snippet)
302
  #####################################
303
 
304
  with gr.Blocks() as app:
305
- gr.Markdown("# FLUX Redux Image Generator (Simplificado)")
306
-
307
  with gr.Row():
308
  with gr.Column():
309
- prompt_input = gr.Textbox(
310
- label="Prompt",
311
- placeholder="Escreva seu prompt...",
312
- lines=5
313
- )
314
- input_image = gr.Image(
315
- label="Imagem de Entrada",
316
- type="filepath"
317
- )
318
-
319
- with gr.Row():
320
- with gr.Column():
321
- lora_weight = gr.Slider(
322
- minimum=0,
323
- maximum=2,
324
- step=0.1,
325
- value=0.6,
326
- label="LoRA Weight (não usado nesse fluxo)"
327
- )
328
- guidance = gr.Slider(
329
- minimum=0,
330
- maximum=20,
331
- step=0.1,
332
- value=3.5,
333
- label="Guidance"
334
- )
335
- downsampling_factor = gr.Slider(
336
- minimum=1,
337
- maximum=8,
338
- step=1,
339
- value=3,
340
- label="Downsampling Factor"
341
- )
342
- weight = gr.Slider(
343
- minimum=0,
344
- maximum=2,
345
- step=0.1,
346
- value=1.0,
347
- label="Redux Model Weight"
348
- )
349
- with gr.Column():
350
- seed = gr.Number(
351
- value=random.randint(1, 2**64),
352
- label="Seed",
353
- precision=0
354
- )
355
- width = gr.Number(
356
- value=512,
357
- label="Width",
358
- precision=0
359
- )
360
- height = gr.Number(
361
- value=512,
362
- label="Height",
363
- precision=0
364
- )
365
- batch_size = gr.Number(
366
- value=1,
367
- label="Batch Size",
368
- precision=0
369
- )
370
- steps = gr.Number(
371
- value=20,
372
- label="Steps",
373
- precision=0
374
- )
375
-
376
- generate_btn = gr.Button("Generate Image")
377
 
378
  with gr.Column():
379
- output_image = gr.Image(label="Generated Image", type="filepath")
380
 
381
  generate_btn.click(
382
  fn=generate_image,
383
  inputs=[
384
- prompt_input,
385
- input_image,
386
- lora_weight,
387
- guidance,
388
- downsampling_factor,
389
- weight,
390
- seed,
391
- width,
392
- height,
393
- batch_size,
394
- steps
395
  ],
396
  outputs=[output_image]
397
  )
398
 
399
  if __name__ == "__main__":
400
- # Você pode usar app.launch(share=True) se quiser compartilhar via link.
401
  app.launch()
 
8
  from PIL import Image
9
  from huggingface_hub import hf_hub_download
10
 
 
 
11
  #####################################
12
  # 1. Funções auxiliares de caminho e import
13
  #####################################
 
34
  else:
35
  print("Não foi possível encontrar o diretório ComfyUI.")
36
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
37
  def import_custom_nodes() -> None:
38
  """
39
+ Inicializa os nós extras do ComfyUI, sem importar o servidor.
 
40
  """
 
 
41
  from nodes import init_extra_nodes
 
 
 
 
 
 
42
  init_extra_nodes()
43
 
44
  #####################################
45
+ # 2. Configurando o ambiente
46
  #####################################
47
 
48
  add_comfyui_directory_to_sys_path()
 
49
  import_custom_nodes()
50
 
51
  #####################################
 
68
  # 4. Download de modelos (ajuste conforme sua necessidade)
69
  #####################################
70
 
71
+ # Criando pastas de modelos, se necessário
72
  os.makedirs("models/text_encoders", exist_ok=True)
73
  os.makedirs("models/style_models", exist_ok=True)
74
  os.makedirs("models/diffusion_models", exist_ok=True)
75
  os.makedirs("models/vae", exist_ok=True)
76
  os.makedirs("models/clip_vision", exist_ok=True)
77
 
78
+ # Baixando os modelos necessários
79
  try:
80
+ print("Baixando modelos...")
81
  hf_hub_download(repo_id="black-forest-labs/FLUX.1-Redux-dev",
82
  filename="flux1-redux-dev.safetensors",
83
  local_dir="models/style_models")
 
84
  hf_hub_download(repo_id="comfyanonymous/flux_text_encoders",
85
  filename="t5xxl_fp16.safetensors",
86
  local_dir="models/text_encoders")
 
 
87
  hf_hub_download(repo_id="zer0int/CLIP-GmP-ViT-L-14",
88
  filename="ViT-L-14-TEXT-detail-improved-hiT-GmP-HF.safetensors",
89
  local_dir="models/text_encoders")
 
90
  hf_hub_download(repo_id="black-forest-labs/FLUX.1-dev",
91
  filename="ae.safetensors",
92
  local_dir="models/vae")
 
93
  hf_hub_download(repo_id="black-forest-labs/FLUX.1-dev",
94
  filename="flux1-dev.safetensors",
95
  local_dir="models/diffusion_models")
 
96
  hf_hub_download(repo_id="google/siglip-so400m-patch14-384",
97
  filename="model.safetensors",
98
  local_dir="models/clip_vision")
99
  except Exception as e:
100
+ print("Erro ao baixar modelos:", e)
101
 
102
  #####################################
103
+ # 5. Carregando os modelos do ComfyUI
104
  #####################################
105
 
106
+ # Inicializando nós e modelos
107
  dualcliploader = DualCLIPLoader()
108
  clip_model = dualcliploader.load_clip(
109
  clip_name1="t5xxl_fp16.safetensors",
 
111
  type="flux"
112
  )
113
 
 
114
  clipvisionloader = CLIPVisionLoader()
115
  clip_vision_model = clipvisionloader.load_clip(
116
  clip_name="model.safetensors"
117
  )
118
 
 
119
  stylemodelloader = StyleModelLoader()
120
  style_model = stylemodelloader.load_style_model(
121
  style_model_name="flux1-redux-dev.safetensors"
122
  )
123
 
 
124
  vaeloader = VAELoader()
125
  vae_model = vaeloader.load_vae(
126
  vae_name="ae.safetensors"
127
  )
128
 
 
 
 
129
  model_management.load_models_gpu([
130
+ clip_model[0], clip_vision_model[0], style_model[0], vae_model[0]
131
  ])
132
 
133
  #####################################
134
+ # 6. Função de geração de imagem
135
  #####################################
136
 
137
  def get_value_at_index(obj: Union[Sequence, Mapping], index: int) -> Any:
138
+ """Retorna o valor no índice especificado."""
139
  try:
140
  return obj[index]
141
  except KeyError:
142
  return obj["result"][index]
143
 
 
 
 
 
 
144
  def generate_image(
145
  prompt: str,
146
  input_image_path: str,
 
147
  guidance: float,
148
  downsampling_factor: float,
149
  weight: float,
150
  seed: int,
151
  width: int,
152
  height: int,
 
153
  steps: int,
154
  progress=gr.Progress(track_tqdm=True)
155
  ):
156
  """
157
+ Gera uma imagem usando os nós do ComfyUI.
158
  """
159
  try:
160
  # Garantindo repetibilidade do seed
161
  torch.manual_seed(seed)
162
  random.seed(seed)
163
 
164
+ # Encode do texto
165
  cliptextencode = CLIPTextEncode()
166
  encoded_text = cliptextencode.encode(
167
  text=prompt,
168
  clip=get_value_at_index(clip_model, 0)
169
  )
170
 
171
+ # Carregar imagem de entrada
172
  loadimage = LoadImage()
173
  loaded_image = loadimage.load_image(image=input_image_path)
174
 
175
+ # Guidance
176
  fluxguidance = NODE_CLASS_MAPPINGS["FluxGuidance"]()
177
  flux_guided = fluxguidance.append(
178
  guidance=guidance,
179
  conditioning=get_value_at_index(encoded_text, 0)
180
  )
181
 
182
+ # Aplicar estilo
183
  reduxadvanced = NODE_CLASS_MAPPINGS["ReduxAdvanced"]()
184
+ styled_image = reduxadvanced.apply_stylemodel(
185
  downsampling_factor=downsampling_factor,
186
  downsampling_function="area",
187
  mode="keep aspect ratio",
 
192
  image=get_value_at_index(loaded_image, 0)
193
  )
194
 
195
+ # Gerar imagem final (decodificar do VAE)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
196
  vaedecode = VAEDecode()
197
+ decoded_image = vaedecode.decode(
198
+ samples=get_value_at_index(styled_image, 0),
199
  vae=get_value_at_index(vae_model, 0)
200
  )
201
 
202
+ # Salvar a imagem
203
  output_dir = "output"
204
  os.makedirs(output_dir, exist_ok=True)
205
+ output_path = os.path.join(output_dir, f"generated_{random.randint(1, 99999)}.png")
206
+
207
+ Image.fromarray((decoded_image[0] * 255).astype("uint8")).save(output_path)
208
+ return output_path
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
209
  except Exception as e:
210
+ print("Erro ao gerar imagem:", e)
211
  return None
212
 
213
  #####################################
214
+ # 7. Interface Gradio
215
  #####################################
216
 
217
  with gr.Blocks() as app:
218
+ gr.Markdown("# FLUX Redux Image Generator")
 
219
  with gr.Row():
220
  with gr.Column():
221
+ prompt_input = gr.Textbox(label="Prompt", placeholder="Escreva seu prompt...", lines=3)
222
+ input_image = gr.Image(label="Imagem de Entrada", type="filepath")
223
+ guidance_slider = gr.Slider(minimum=0, maximum=20, step=0.1, value=3.5, label="Guidance")
224
+ downsampling_factor_slider = gr.Slider(minimum=1, maximum=8, step=1, value=3, label="Downsampling Factor")
225
+ weight_slider = gr.Slider(minimum=0, maximum=2, step=0.1, value=1.0, label="Peso do Estilo")
226
+ seed_input = gr.Number(label="Seed", value=random.randint(1, 2**32), precision=0)
227
+ width_input = gr.Number(label="Largura", value=512, precision=0)
228
+ height_input = gr.Number(label="Altura", value=512, precision=0)
229
+ steps_input = gr.Number(label="Passos", value=50, precision=0)
230
+ generate_btn = gr.Button("Gerar Imagem")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
231
 
232
  with gr.Column():
233
+ output_image = gr.Image(label="Imagem Gerada")
234
 
235
  generate_btn.click(
236
  fn=generate_image,
237
  inputs=[
238
+ prompt_input, input_image, guidance_slider,
239
+ downsampling_factor_slider, weight_slider,
240
+ seed_input, width_input, height_input, steps_input
 
 
 
 
 
 
 
 
241
  ],
242
  outputs=[output_image]
243
  )
244
 
245
  if __name__ == "__main__":
 
246
  app.launch()