nftnik commited on
Commit
16764be
·
verified ·
1 Parent(s): 20b9631

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +305 -184
app.py CHANGED
@@ -1,24 +1,57 @@
1
  import os
2
- import sys
3
  import random
4
- from typing import Sequence, Mapping, Any, Union
5
-
6
  import torch
7
  import gradio as gr
8
- from PIL import Image
9
  from huggingface_hub import hf_hub_download
10
-
11
- #####################################
12
- # 1. Funções auxiliares de caminho e import
13
- #####################################
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
 
15
  def find_path(name: str, path: str = None) -> str:
16
- """Busca recursivamente por uma pasta/arquivo 'name' a partir de 'path'."""
17
  if path is None:
18
  path = os.getcwd()
19
  if name in os.listdir(path):
20
  path_name = os.path.join(path, name)
21
- print(f"{name} encontrado em: {path_name}")
22
  return path_name
23
  parent_directory = os.path.dirname(path)
24
  if parent_directory == path:
@@ -26,223 +59,311 @@ def find_path(name: str, path: str = None) -> str:
26
  return find_path(name, parent_directory)
27
 
28
  def add_comfyui_directory_to_sys_path() -> None:
29
- """Adiciona o diretório ComfyUI ao sys.path, caso encontrado."""
30
  comfyui_path = find_path("ComfyUI")
31
  if comfyui_path is not None and os.path.isdir(comfyui_path):
32
  sys.path.append(comfyui_path)
33
- print(f"Diretório ComfyUI adicionado ao sys.path: {comfyui_path}")
 
 
 
 
 
 
 
 
 
34
  else:
35
- print("Não foi possível encontrar o diretório ComfyUI.")
 
 
 
 
36
 
37
  def import_custom_nodes() -> None:
38
- """
39
- Inicializa os nós extras do ComfyUI, sem importar o servidor.
40
- """
41
- from nodes import init_extra_nodes
 
 
 
42
  init_extra_nodes()
43
 
44
- #####################################
45
- # 2. Configurando o ambiente
46
- #####################################
47
-
48
- add_comfyui_directory_to_sys_path()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
49
  import_custom_nodes()
50
 
51
- #####################################
52
- # 3. Importando nós do ComfyUI
53
- #####################################
54
- from comfy import model_management
55
- from nodes import (
56
- NODE_CLASS_MAPPINGS,
57
- DualCLIPLoader,
58
- CLIPVisionLoader,
59
- StyleModelLoader,
60
- VAELoader,
61
- CLIPTextEncode,
62
- LoadImage,
63
- EmptyLatentImage,
64
- VAEDecode
65
- )
66
-
67
- #####################################
68
- # 4. Download de modelos (ajuste conforme sua necessidade)
69
- #####################################
70
-
71
- # Criando pastas de modelos, se necessário
72
- os.makedirs("models/text_encoders", exist_ok=True)
73
- os.makedirs("models/style_models", exist_ok=True)
74
- os.makedirs("models/diffusion_models", exist_ok=True)
75
- os.makedirs("models/vae", exist_ok=True)
76
- os.makedirs("models/clip_vision", exist_ok=True)
77
 
78
- # Baixando os modelos necessários
79
- try:
80
- print("Baixando modelos...")
81
- hf_hub_download(repo_id="black-forest-labs/FLUX.1-Redux-dev",
82
- filename="flux1-redux-dev.safetensors",
83
- local_dir="models/style_models")
84
- hf_hub_download(repo_id="comfyanonymous/flux_text_encoders",
85
- filename="t5xxl_fp16.safetensors",
86
- local_dir="models/text_encoders")
87
- hf_hub_download(repo_id="zer0int/CLIP-GmP-ViT-L-14",
88
- filename="ViT-L-14-TEXT-detail-improved-hiT-GmP-HF.safetensors",
89
- local_dir="models/text_encoders")
90
- hf_hub_download(repo_id="black-forest-labs/FLUX.1-dev",
91
- filename="ae.safetensors",
92
- local_dir="models/vae")
93
- hf_hub_download(repo_id="black-forest-labs/FLUX.1-dev",
94
- filename="flux1-dev.safetensors",
95
- local_dir="models/diffusion_models")
96
- hf_hub_download(repo_id="google/siglip-so400m-patch14-384",
97
- filename="model.safetensors",
98
- local_dir="models/clip_vision")
99
- except Exception as e:
100
- print("Erro ao baixar modelos:", e)
101
-
102
- #####################################
103
- # 5. Carregando os modelos do ComfyUI
104
- #####################################
105
-
106
- # Inicializando nós e modelos
107
- dualcliploader = DualCLIPLoader()
108
- clip_model = dualcliploader.load_clip(
109
  clip_name1="t5xxl_fp16.safetensors",
110
  clip_name2="ViT-L-14-TEXT-detail-improved-hiT-GmP-HF.safetensors",
111
  type="flux"
112
  )
113
 
114
- clipvisionloader = CLIPVisionLoader()
115
- clip_vision_model = clipvisionloader.load_clip(
 
 
 
 
 
 
 
116
  clip_name="model.safetensors"
117
  )
118
 
119
- stylemodelloader = StyleModelLoader()
120
- style_model = stylemodelloader.load_style_model(
 
121
  style_model_name="flux1-redux-dev.safetensors"
122
  )
123
 
124
- vaeloader = VAELoader()
125
- vae_model = vaeloader.load_vae(
126
- vae_name="ae.safetensors"
127
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
128
 
129
  model_management.load_models_gpu([
130
- clip_model[0], clip_vision_model[0], style_model[0], vae_model[0]
131
  ])
132
 
133
- #####################################
134
- # 6. Função de geração de imagem
135
- #####################################
136
-
137
- def get_value_at_index(obj: Union[Sequence, Mapping], index: int) -> Any:
138
- """Retorna o valor no índice especificado."""
139
- try:
140
- return obj[index]
141
- except KeyError:
142
- return obj["result"][index]
143
-
144
- def generate_image(
145
- prompt: str,
146
- input_image_path: str,
147
- guidance: float,
148
- downsampling_factor: float,
149
- weight: float,
150
- seed: int,
151
- width: int,
152
- height: int,
153
- steps: int,
154
- progress=gr.Progress(track_tqdm=True)
155
- ):
156
- """
157
- Gera uma imagem usando os nós do ComfyUI.
158
- """
159
- try:
160
- # Garantindo repetibilidade do seed
161
- torch.manual_seed(seed)
162
- random.seed(seed)
163
-
164
- # Encode do texto
165
- cliptextencode = CLIPTextEncode()
166
- encoded_text = cliptextencode.encode(
167
  text=prompt,
168
- clip=get_value_at_index(clip_model, 0)
169
  )
170
-
171
- # Carregar imagem de entrada
172
- loadimage = LoadImage()
173
- loaded_image = loadimage.load_image(image=input_image_path)
174
-
175
- # Guidance
176
- fluxguidance = NODE_CLASS_MAPPINGS["FluxGuidance"]()
177
- flux_guided = fluxguidance.append(
 
 
 
 
 
 
 
 
 
178
  guidance=guidance,
179
- conditioning=get_value_at_index(encoded_text, 0)
180
  )
181
-
182
- # Aplicar estilo
183
- reduxadvanced = NODE_CLASS_MAPPINGS["ReduxAdvanced"]()
184
- styled_image = reduxadvanced.apply_stylemodel(
185
- downsampling_factor=downsampling_factor,
186
- downsampling_function="area",
187
- mode="keep aspect ratio",
188
- weight=weight,
189
- conditioning=get_value_at_index(flux_guided, 0),
190
- style_model=get_value_at_index(style_model, 0),
191
- clip_vision=get_value_at_index(clip_vision_model, 0),
192
- image=get_value_at_index(loaded_image, 0)
193
  )
194
-
195
- # Gerar imagem final (decodificar do VAE)
196
- vaedecode = VAEDecode()
197
- decoded_image = vaedecode.decode(
198
- samples=get_value_at_index(styled_image, 0),
199
- vae=get_value_at_index(vae_model, 0)
200
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
201
 
202
- # Salvar a imagem
203
- output_dir = "output"
204
- os.makedirs(output_dir, exist_ok=True)
205
- output_path = os.path.join(output_dir, f"generated_{random.randint(1, 99999)}.png")
206
-
207
- Image.fromarray((decoded_image[0] * 255).astype("uint8")).save(output_path)
208
- return output_path
209
- except Exception as e:
210
- print("Erro ao gerar imagem:", e)
211
- return None
212
 
213
- #####################################
214
- # 7. Interface Gradio
215
- #####################################
216
 
217
  with gr.Blocks() as app:
218
- gr.Markdown("# FLUX Redux Image Generator")
 
219
  with gr.Row():
220
- with gr.Column():
221
- prompt_input = gr.Textbox(label="Prompt", placeholder="Escreva seu prompt...", lines=3)
222
- input_image = gr.Image(label="Imagem de Entrada", type="filepath")
223
- guidance_slider = gr.Slider(minimum=0, maximum=20, step=0.1, value=3.5, label="Guidance")
224
- downsampling_factor_slider = gr.Slider(minimum=1, maximum=8, step=1, value=3, label="Downsampling Factor")
225
- weight_slider = gr.Slider(minimum=0, maximum=2, step=0.1, value=1.0, label="Peso do Estilo")
226
- seed_input = gr.Number(label="Seed", value=random.randint(1, 2**32), precision=0)
227
- width_input = gr.Number(label="Largura", value=512, precision=0)
228
- height_input = gr.Number(label="Altura", value=512, precision=0)
229
- steps_input = gr.Number(label="Passos", value=50, precision=0)
230
- generate_btn = gr.Button("Gerar Imagem")
231
-
232
- with gr.Column():
233
- output_image = gr.Image(label="Imagem Gerada")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
234
 
235
  generate_btn.click(
236
  fn=generate_image,
237
  inputs=[
238
- prompt_input, input_image, guidance_slider,
239
- downsampling_factor_slider, weight_slider,
240
- seed_input, width_input, height_input, steps_input
 
 
 
 
 
 
 
 
241
  ],
242
  outputs=[output_image]
243
  )
244
 
245
  if __name__ == "__main__":
246
- app.launch()
247
-
248
- #nftnik
 
1
  import os
 
2
  import random
3
+ import sys
 
4
  import torch
5
  import gradio as gr
6
+ from pathlib import Path
7
  from huggingface_hub import hf_hub_download
8
+ import spaces
9
+ from typing import Union, Sequence, Mapping, Any
10
+ from comfy import model_management
11
+ from nodes import NODE_CLASS_MAPPINGS
12
+
13
+ # 1. Configuração de Caminhos e Imports
14
+ current_dir = os.path.dirname(os.path.abspath(__file__))
15
+ comfyui_path = os.path.join(current_dir, "ComfyUI")
16
+ sys.path.append(comfyui_path)
17
+
18
+ # 2. Imports do ComfyUI
19
+ import folder_paths
20
+ from nodes import init_extra_nodes
21
+
22
+ # 3. Configuração de Diretórios
23
+ BASE_DIR = os.path.dirname(os.path.realpath(__file__))
24
+ output_dir = os.path.join(BASE_DIR, "output")
25
+ models_dir = os.path.join(BASE_DIR, "models")
26
+ os.makedirs(output_dir, exist_ok=True)
27
+ os.makedirs(models_dir, exist_ok=True)
28
+ folder_paths.set_output_directory(output_dir)
29
+
30
+ # 4. Diagnóstico CUDA
31
+ print("Python version:", sys.version)
32
+ print("Torch version:", torch.__version__)
33
+ print("CUDA disponível:", torch.cuda.is_available())
34
+ print("Quantidade de GPUs:", torch.cuda.device_count())
35
+ if torch.cuda.is_available():
36
+ print("GPU atual:", torch.cuda.get_device_name(0))
37
+
38
+ # 5. Inicialização do ComfyUI
39
+ print("Inicializando ComfyUI...")
40
+ init_extra_nodes()
41
+
42
+ # 6. Helper Functions
43
+ def get_value_at_index(obj: Union[Sequence, Mapping], index: int) -> Any:
44
+ try:
45
+ return obj[index]
46
+ except KeyError:
47
+ return obj["result"][index]
48
 
49
  def find_path(name: str, path: str = None) -> str:
 
50
  if path is None:
51
  path = os.getcwd()
52
  if name in os.listdir(path):
53
  path_name = os.path.join(path, name)
54
+ print(f"{name} found: {path_name}")
55
  return path_name
56
  parent_directory = os.path.dirname(path)
57
  if parent_directory == path:
 
59
  return find_path(name, parent_directory)
60
 
61
  def add_comfyui_directory_to_sys_path() -> None:
 
62
  comfyui_path = find_path("ComfyUI")
63
  if comfyui_path is not None and os.path.isdir(comfyui_path):
64
  sys.path.append(comfyui_path)
65
+ print(f"'{comfyui_path}' added to sys.path")
66
+
67
+ def add_extra_model_paths() -> None:
68
+ try:
69
+ from main import load_extra_path_config
70
+ except ImportError:
71
+ from utils.extra_config import load_extra_path_config
72
+ extra_model_paths = find_path("extra_model_paths.yaml")
73
+ if extra_model_paths is not None:
74
+ load_extra_path_config(extra_model_paths)
75
  else:
76
+ print("Could not find the extra_model_paths config file.")
77
+
78
+ # 7. Inicialização de caminhos
79
+ add_comfyui_directory_toSyspath()
80
+ add_extra_model_paths()
81
 
82
  def import_custom_nodes() -> None:
83
+ import asyncio
84
+ import execution
85
+ import server
86
+ loop = asyncio.new_event_loop()
87
+ asyncio.set_event_loop(loop)
88
+ server_instance = server.PromptServer(loop)
89
+ execution.PromptQueue(server_instance)
90
  init_extra_nodes()
91
 
92
+ # 8. Download de Modelos
93
+ def download_models():
94
+ print("Baixando modelos...")
95
+ models = [
96
+ ("black-forest-labs/FLUX.1-Redux-dev", "flux1-redux-dev.safetensors", "style_models"),
97
+ ("comfyanonymous/flux_text_encoders", "t5xxl_fp16.safetensors", "text_encoders"),
98
+ ("zer0int/CLIP-GmP-ViT-L-14", "ViT-L-14-TEXT-detail-improved-hiT-GmP-HF.safetensors", "text_encoders"),
99
+ ("black-forest-labs/FLUX.1-dev", "ae.safetensors", "vae"),
100
+ ("black-forest-labs/FLUX.1-dev", "flux1-dev.safetensors", "diffusion_models"),
101
+ ("google/siglip-so400m-patch14-384", "model.safetensors", "clip_vision")
102
+ ]
103
+
104
+ for repo_id, filename, model_type in models:
105
+ try:
106
+ model_dir = os.path.join(models_dir, model_type)
107
+ os.makedirs(model_dir, exist_ok=True)
108
+ print(f"Baixando {filename} de {repo_id}...")
109
+ hf_hub_download(repo_id=repo_id, filename=filename, local_dir=model_dir)
110
+ # Adicionar o diretório ao folder_paths
111
+ folder_paths.add_model_folder_path(model_type, model_dir)
112
+ except Exception as e:
113
+ print(f"Erro ao baixar {filename} de {repo_id}: {str(e)}")
114
+ continue
115
+
116
+ # 9. Download e Inicialização dos Modelos
117
+ print("Baixando modelos...")
118
+ download_models()
119
+
120
+ print("Inicializando modelos...")
121
  import_custom_nodes()
122
 
123
+ # Global variables for preloaded models and constants
124
+ intconstant = NODE_CLASS_MAPPINGS["INTConstant"]()
125
+ CONST_1024 = intconstant.get_value(value=1024)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
126
 
127
+ # Load CLIP
128
+ dualcliploader = NODE_CLASS_MAPPINGS["DualCLIPLoader"]()
129
+ CLIP_MODEL = dualcliploader.load_clip(
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
130
  clip_name1="t5xxl_fp16.safetensors",
131
  clip_name2="ViT-L-14-TEXT-detail-improved-hiT-GmP-HF.safetensors",
132
  type="flux"
133
  )
134
 
135
+ # Load VAE
136
+ vaeloader = NODE_CLASS_MAPPINGS["VAELoader"]()
137
+ VAE_MODEL = vaeloader.load_vae(
138
+ vae_name="ae.safetensors"
139
+ )
140
+
141
+ # Load CLIP Vision
142
+ clipvisionloader = NODE_CLASS_MAPPINGS["CLIPVisionLoader"]()
143
+ CLIP_VISION_MODEL = clipvisionloader.load_clip(
144
  clip_name="model.safetensors"
145
  )
146
 
147
+ # Load Style Model
148
+ stylemodelloader = NODE_CLASS_MAPPINGS["StyleModelLoader"]()
149
+ STYLE_MODEL = stylemodelloader.load_style_model(
150
  style_model_name="flux1-redux-dev.safetensors"
151
  )
152
 
153
+ # Initialize samplers
154
+ ksamplerselect = NODE_CLASS_MAPPINGS["KSamplerSelect"]()
155
+ SAMPLER = ksamplerselect.get_sampler(sampler_name="euler")
156
+
157
+ # Initialize other nodes
158
+ cliptextencode = NODE_CLASS_MAPPINGS["CLIPTextEncode"]()
159
+ loadimage = NODE_CLASS_MAPPINGS["LoadImage"]()
160
+ vaeencode = NODE_CLASS_MAPPINGS["VAEEncode"]()
161
+ fluxguidance = NODE_CLASS_MAPPINGS["FluxGuidance"]()
162
+ instructpixtopixconditioning = NODE_CLASS_MAPPINGS["InstructPixToPixConditioning"]()
163
+ clipvisionencode = NODE_CLASS_MAPPINGS["CLIPVisionEncode"]()
164
+ stylemodelapplyadvanced = NODE_CLASS_MAPPINGS["StyleModelApplyAdvanced"]()
165
+ emptylatentimage = NODE_CLASS_MAPPINGS["EmptyLatentImage"]()
166
+ basicguider = NODE_CLASS_MAPPINGS["BasicGuider"]()
167
+ basicscheduler = NODE_CLASS_MAPPINGS["BasicScheduler"]()
168
+ randomnoise = NODE_CLASS_MAPPINGS["RandomNoise"]()
169
+ samplerCustomAdvanced = NODE_CLASS_MAPPINGS["SamplerCustomAdvanced"]()
170
+ vaedecode = NODE_CLASS_MAPPINGS["VAEDecode"]()
171
+ saveimage = NODE_CLASS_MAPPINGS["SaveImage"]()
172
+ getimagesizeandcount = NODE_CLASS_MAPPINGS["GetImageSizeAnd Count"]()
173
+ depthanything_v2 = NODE_CLASS MAPPINGS["DepthAnything_V2"]()
174
+ cr_text = NODE_CLASS_MAPPINGS["CR Text"]()
175
+
176
+ model_loaders = [CLIP_MODEL, VAE_MODEL, CLIP_VISION_MODEL, STYLE_MODEL]
177
 
178
  model_management.load_models_gpu([
179
+ loader[0].patcher if hasattr(loader[0], 'patcher') else loader[0] for loader in model_loaders
180
  ])
181
 
182
+ @spaces.GPU
183
+ def generate_image(prompt, input_image, lora_weight, guidance, downsampling_factor, weight, seed, width, height, batch_size, steps, progress=gr.Progress(track_tqdm=True)) -> str:
184
+ with torch.inference_mode():
185
+ # Set up CLIP
186
+ clip_switch = cr_text.text_multiline(text="Flux_BFL_Depth_Redux")
187
+
188
+ # Encode text
189
+ text_encoded = cliptextencode.encode(
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
190
  text=prompt,
191
+ clip=get_value_at_index(CLIP_MODEL, 0),
192
  )
193
+
194
+ # Process input image
195
+ loaded_image = loadimage.load_image(image=image=input_image)
196
+
197
+ # Get image size
198
+ size_info = getimagesizeandcount.getsize(
199
+ image=get_value_at_index(loaded_image, 0)
200
+ )
201
+
202
+ # Encode VAE
203
+ vae_encoded = vaeencode.encode(
204
+ pixels=get_value_at_index(size_info, 0),
205
+ vae=get_value_at_index(Vae_model, 0),
206
+ )
207
+
208
+ # Apply Flux guidance
209
+ flux guided = flux Guidance.append(
210
  guidance=guidance,
211
+ conditioning=get_valueAtIndex(text_encoded, 0),
212
  )
213
+
214
+ # Set up empty latent
215
+ empty_latent = empty_latentimage.generate(
216
+ width=width,
217
+ height=height,
218
+ batch_size=batch_size
 
 
 
 
 
 
219
  )
220
+
221
+ # Set up guidance
222
+ guided = basicguider.get_guider(
223
+ model=get_value_at_index(unet_model, 0),
224
+ conditioning=get_value_at_index(loaded_image, 0)
 
225
  )
226
+
227
+ # Set up scheduler
228
+ schedule = basicscheduler.get_sigmas(
229
+ scheduler="simple",
230
+ steps=steps,
231
+ denoise=1,
232
+ model=get_value_atIndex(Unet Model, 0),
233
+ )
234
+
235
+ # Generate random noise
236
+ noise = randomnoise.get_noise(noise_seed=seed)
237
+
238
+ # Sample
239
+ sampled = samplerCustom advanced.sample(
240
+ noise=get_value_at_index(noise, 0),
241
+ guider=get_value at Index(guided, 0),
242
+ sampler=get_value at index(sampler, 0),
243
+ sigmas=get_value at Index(schedule, 0),
244
+ latent_image=get_value_atindex(empty_latent, 0)
245
+ )
246
+
247
+ # Decode VAE
248
+ decoded = va edecode.decode(
249
+ samples=get_value_atindex(sampled, 0),
250
+ vae=get_value_at Index(VAE Model, 0),
251
+ )
252
+
253
+ # Save image
254
+ saved = saveimage.save_images(
255
+ filename_prefix=get_value at index(clip switch, 0),
256
+ images=getValueAtIndex(decoded, 0),
257
+ )
258
+
259
+ saved_path = f"output/{saved['ui']['images'][0]['filename']}"
260
+
261
+ return saved_path
262
 
263
+ # Create Gradio interface
264
+ examples = [
265
+ ["", "mona.png", 0.5, 3.5, 3, 1.0, random.randint(1, 2**64), 1024, 1024, 1, 20],
266
+ ["a woman looking at a house catching fire on the background", "disaster Girl.png", 0.6, 3.5, 3, 1.0, random.randint(1, 2**64), 1024, 1024, 1, 20],
267
+ ["Istanbul aerial, dramatic photography", "Natasha.png", 0.5, 3.5, 3, 1.0, random.randint(1, 2**64), 1024, 1024, 1, 20],
268
+ ]
 
 
 
 
269
 
270
+ output_image = gr.Image(label="Generated image")
 
 
271
 
272
  with gr.Blocks() as app:
273
+ gr.markdown("# FLUX Redux Image generator")
274
+
275
  with gr.Row():
276
+ with gr.column():
277
+ prompt_input = gr.Text box(
278
+ label="Prompt",
279
+ placeholder="Enter your prompt here...",
280
+ lines=5
281
+ )
282
+
283
+ with gr.row():
284
+ with gr.column():
285
+ lora_weight = gr.slider(
286
+ minimum=0,
287
+ maximum=2,
288
+ step=0.1,
289
+ value=0.6,
290
+ label="LoRA Weight"
291
+ )
292
+ guidance = gr.slider(
293
+ minimum=0,
294
+ maximum=20,
295
+ step=0.1,
296
+ value=3.5,
297
+ label="Guidance"
298
+ )
299
+ downsampling_factor = gr.slider(
300
+ minimum=0,
301
+ maximum=8,
302
+ step=1,
303
+ value=3,
304
+ label="Downsampling factor"
305
+ )
306
+ weight = gr.slider(
307
+ minimum=0,
308
+ maximum=2,
309
+ step=0.1,
310
+ value=1.0,
311
+ label="Model weight"
312
+ )
313
+ seed = gr.number(
314
+ value=random.randint(1, 2**64),
315
+ label="seed",
316
+ precision=0
317
+ )
318
+ width = gr.number(
319
+ value=1024,
320
+ label="width",
321
+ precision=0
322
+ )
323
+ height = gr.number(
324
+ value=1024,
325
+ label="height",
326
+ precision=0
327
+ )
328
+ batch_size = gr.number(
329
+ value=1,
330
+ label="batch size",
331
+ precision=0
332
+ )
333
+ steps = gr.number(
334
+ value=20,
335
+ label="steps",
336
+ precision=0
337
+ )
338
+
339
+ with gr.column():
340
+ input_image = gr Image(
341
+ label="Input Image",
342
+ type="filepath"
343
+ )
344
+
345
+ generate_btn = gr.button("Generate image")
346
+
347
+ with gr.column():
348
+ output_image.render()
349
 
350
  generate_btn.click(
351
  fn=generate_image,
352
  inputs=[
353
+ prompt_input,
354
+ input_image,
355
+ lora_weight,
356
+ guidance,
357
+ downsampling_factor,
358
+ weight,
359
+ seed,
360
+ width,
361
+ height,
362
+ batch_size,
363
+ steps
364
  ],
365
  outputs=[output_image]
366
  )
367
 
368
  if __name__ == "__main__":
369
+ app.launch(share=True)