aiqtech commited on
Commit
9c05abf
·
verified ·
1 Parent(s): 9eb75cd

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -21
app.py CHANGED
@@ -97,8 +97,6 @@ def add_extra_model_paths() -> None:
97
  add_comfyui_directory_to_sys_path()
98
  add_extra_model_paths()
99
 
100
-
101
-
102
  def import_custom_nodes() -> None:
103
  import asyncio
104
  import execution
@@ -195,8 +193,9 @@ getimagesizeandcount = NODE_CLASS_MAPPINGS["GetImageSizeAndCount"]()
195
  depthanything_v2 = NODE_CLASS_MAPPINGS["DepthAnything_V2"]()
196
  imageresize = NODE_CLASS_MAPPINGS["ImageResize+"]()
197
 
 
198
  @spaces.GPU
199
- def generate_image(prompt, structure_image, style_image, depth_strength=15, style_strength=0.5, progress=gr.Progress(track_tqdm=True)) -> str:
200
  """Main generation function that processes inputs and returns the path to the generated image."""
201
  with torch.inference_mode():
202
  # Set up CLIP
@@ -206,9 +205,9 @@ def generate_image(prompt, structure_image, style_image, depth_strength=15, styl
206
  clip2=get_value_at_index(CLIP_MODEL, 0),
207
  )
208
 
209
- # Encode text
210
  text_encoded = cliptextencode.encode(
211
- text=prompt,
212
  clip=get_value_at_index(clip_switch, 0),
213
  )
214
  empty_text = cliptextencode.encode(
@@ -328,17 +327,14 @@ def generate_image(prompt, structure_image, style_image, depth_strength=15, styl
328
  saved_path = f"output/{saved['ui']['images'][0]['filename']}"
329
  return saved_path
330
 
331
-
332
-
333
- output_image = gr.Image(label="Virtual Try-On Result")
334
-
335
-
336
-
337
  # Create Gradio interface
338
  examples = [
339
- ["person wearing fashionable clothing", "f1.webp", "f11.webp", 15, 0.6],
340
- ["person wearing elegant dress", "f2.webp", "f21.webp", 15, 0.5],
341
- ["person wearing casual outfit", "f3.webp", "f31.webp", 15, 0.5],
 
 
 
342
  ]
343
 
344
  # Gradio 인터페이스 생성
@@ -353,10 +349,6 @@ with demo:
353
  with gr.TabItem("👔 Virtual Try-On"):
354
  with gr.Row():
355
  with gr.Column():
356
- prompt_input = gr.Textbox(
357
- label="Style Description",
358
- placeholder="Describe the desired style (e.g., 'person wearing elegant dress')"
359
- )
360
  with gr.Row():
361
  with gr.Group():
362
  structure_image = gr.Image(
@@ -390,7 +382,7 @@ with demo:
390
 
391
  gr.Examples(
392
  examples=examples,
393
- inputs=[prompt_input, structure_image, style_image, depth_strength, style_strength],
394
  outputs=output_image,
395
  fn=generate_image,
396
  cache_examples=False
@@ -399,9 +391,9 @@ with demo:
399
  # Connect the button to the generation function
400
  generate_button.click(
401
  fn=generate_image,
402
- inputs=[prompt_input, structure_image, style_image, depth_strength, style_strength],
403
  outputs=output_image
404
  )
405
 
406
  if __name__ == "__main__":
407
- demo.launch(share=True)
 
97
  add_comfyui_directory_to_sys_path()
98
  add_extra_model_paths()
99
 
 
 
100
  def import_custom_nodes() -> None:
101
  import asyncio
102
  import execution
 
193
  depthanything_v2 = NODE_CLASS_MAPPINGS["DepthAnything_V2"]()
194
  imageresize = NODE_CLASS_MAPPINGS["ImageResize+"]()
195
 
196
+
197
  @spaces.GPU
198
+ def generate_image(structure_image, style_image, depth_strength=15, style_strength=0.5, progress=gr.Progress(track_tqdm=True)) -> str:
199
  """Main generation function that processes inputs and returns the path to the generated image."""
200
  with torch.inference_mode():
201
  # Set up CLIP
 
205
  clip2=get_value_at_index(CLIP_MODEL, 0),
206
  )
207
 
208
+ # Encode text with default prompt
209
  text_encoded = cliptextencode.encode(
210
+ text="person wearing fashionable clothing",
211
  clip=get_value_at_index(clip_switch, 0),
212
  )
213
  empty_text = cliptextencode.encode(
 
327
  saved_path = f"output/{saved['ui']['images'][0]['filename']}"
328
  return saved_path
329
 
 
 
 
 
 
 
330
  # Create Gradio interface
331
  examples = [
332
+ ["f1.webp", "f11.webp", 15, 0.6],
333
+ ["f2.webp", "f21.webp", 15, 0.5],
334
+ ["f3.webp", "f31.webp", 15, 0.5],
335
+ ["qq1.webp", "ww1.webp", 15, 0.5],
336
+ ["qq2.webp", "ww2.webp", 15, 0.5],
337
+ ["qq3.webp", "ww3.webp", 15, 0.5]
338
  ]
339
 
340
  # Gradio 인터페이스 생성
 
349
  with gr.TabItem("👔 Virtual Try-On"):
350
  with gr.Row():
351
  with gr.Column():
 
 
 
 
352
  with gr.Row():
353
  with gr.Group():
354
  structure_image = gr.Image(
 
382
 
383
  gr.Examples(
384
  examples=examples,
385
+ inputs=[structure_image, style_image, depth_strength, style_strength],
386
  outputs=output_image,
387
  fn=generate_image,
388
  cache_examples=False
 
391
  # Connect the button to the generation function
392
  generate_button.click(
393
  fn=generate_image,
394
+ inputs=[structure_image, style_image, depth_strength, style_strength],
395
  outputs=output_image
396
  )
397
 
398
  if __name__ == "__main__":
399
+ demo.launch(share=True)