1inkusFace commited on
Commit
a1853b2
·
verified ·
1 Parent(s): 2e87b57

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +64 -4
app.py CHANGED
@@ -95,8 +95,18 @@ def infer(
95
  height,
96
  guidance_scale,
97
  num_inference_steps,
98
- latent_file,
99
- ip_scale,
 
 
 
 
 
 
 
 
 
 
100
  image_encoder_path,
101
  progress=gr.Progress(track_tqdm=True),
102
  ):
@@ -194,9 +204,49 @@ with gr.Blocks(theme=gr.themes.Origin(),css=css) as demo:
194
  container=False,
195
  )
196
  run_button = gr.Button("Run", scale=0, variant="primary")
197
- result = gr.Image(label="Result", show_label=False)
198
  with gr.Accordion("Advanced Settings", open=True):
199
- latent_file = gr.File(label="Image File (optional)")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
200
  image_encoder_path = gr.Dropdown(
201
  ["google/siglip-so400m-patch14-384", "laion/CLIP-ViT-H-14-laion2B-s32B-b79K"],
202
  label="CLIP Model",
@@ -272,7 +322,17 @@ with gr.Blocks(theme=gr.themes.Origin(),css=css) as demo:
272
  guidance_scale,
273
  num_inference_steps,
274
  latent_file,
 
 
 
 
 
275
  ip_scale,
 
 
 
 
 
276
  image_encoder_path,
277
  ],
278
  outputs=[result, expanded_prompt_output],
 
95
  height,
96
  guidance_scale,
97
  num_inference_steps,
98
+ latent_file = gr.File(), # Add latents file input
99
+ latent_file_2 = gr.File(), # Add latents file input
100
+ latent_file_3 = gr.File(), # Add latents file input
101
+ latent_file_4 = gr.File(), # Add latents file input
102
+ latent_file_5 = gr.File(), # Add latents file input
103
+ text_scale: float = 1.0,
104
+ ip_scale: float = 1.0,
105
+ latent_file_1_scale: float = 1.0,
106
+ latent_file_2_scale: float = 1.0,
107
+ latent_file_3_scale: float = 1.0,
108
+ latent_file_4_scale: float = 1.0,
109
+ latent_file_5_scale: float = 1.0,
110
  image_encoder_path,
111
  progress=gr.Progress(track_tqdm=True),
112
  ):
 
204
  container=False,
205
  )
206
  run_button = gr.Button("Run", scale=0, variant="primary")
207
+ result = gr.Image(label="Result", show_label=False)
208
  with gr.Accordion("Advanced Settings", open=True):
209
+ with gr.Row():
210
+ latent_file = gr.File(label="Image Prompt (Required)")
211
+ file_1_strength = gr.Slider(
212
+ label="Img 1 %",
213
+ minimum=0.0,
214
+ maximum=16.0,
215
+ step=0.01,
216
+ value=1.0,
217
+ )
218
+ latent_file_2 = gr.File(label="Image Prompt 2 (Optional)")
219
+ file_2_strength = gr.Slider(
220
+ label="Img 2 %",
221
+ minimum=0.0,
222
+ maximum=16.0,
223
+ step=0.01,
224
+ value=1.0,
225
+ )
226
+ latent_file_3 = gr.File(label="Image Prompt 3 (Optional)")
227
+ file_3_strength = gr.Slider(
228
+ label="Img 3 %",
229
+ minimum=0.0,
230
+ maximum=16.0,
231
+ step=0.01,
232
+ value=1.0,
233
+ )
234
+ latent_file_4 = gr.File(label="Image Prompt 4 (Optional)")
235
+ file_4_strength = gr.Slider(
236
+ label="Img 4 %",
237
+ minimum=0.0,
238
+ maximum=16.0,
239
+ step=0.01,
240
+ value=1.0,
241
+ )
242
+ latent_file_5 = gr.File(label="Image Prompt 5 (Optional)")
243
+ file_5_strength = gr.Slider(
244
+ label="Img 5 %",
245
+ minimum=0.0,
246
+ maximum=16.0,
247
+ step=0.01,
248
+ value=1.0,
249
+ )
250
  image_encoder_path = gr.Dropdown(
251
  ["google/siglip-so400m-patch14-384", "laion/CLIP-ViT-H-14-laion2B-s32B-b79K"],
252
  label="CLIP Model",
 
322
  guidance_scale,
323
  num_inference_steps,
324
  latent_file,
325
+ latent_file_2,
326
+ latent_file_3,
327
+ latent_file_4,
328
+ latent_file_5,
329
+ text_strength,
330
  ip_scale,
331
+ file_1_strength,
332
+ file_2_strength,
333
+ file_3_strength,
334
+ file_4_strength,
335
+ file_5_strength,
336
  image_encoder_path,
337
  ],
338
  outputs=[result, expanded_prompt_output],