fountai commited on
Commit
f86ec0c
·
1 Parent(s): a43c8d9

add dowloader

Browse files
Files changed (1) hide show
  1. app.py +9 -13
app.py CHANGED
@@ -1,20 +1,20 @@
1
- import torch.multiprocessing as mp
2
- import multiprocessing as mp2
3
- from huggingface_hub import login
4
-
5
- mp.set_start_method('spawn', force=True)
6
- mp2.set_start_method('spawn', force=True)
7
 
 
 
8
  import spaces
9
  import gradio as gr
10
  from PIL import Image
11
  import os
12
 
13
  import random
14
- login()
 
 
 
 
15
 
16
  @spaces.GPU(duration=300)
17
- def process_image(image, prompt, steps, use_lora, use_controlnet, use_depth, use_hed, use_ip, lora_name, lora_path, lora_weight, negative_image, neg_prompt, true_gs, guidance, cfg):
18
  from src.flux.xflux_pipeline import XFluxPipeline
19
  def run_xflux_pipeline(
20
  prompt, image, repo_id, name, device,
@@ -111,9 +111,7 @@ def process_image(image, prompt, steps, use_lora, use_controlnet, use_depth, use
111
  neg_prompt=neg_prompt,
112
  image=image,
113
  negative_image=negative_image,
114
- lora_name=lora_name,
115
  lora_weight=lora_weight,
116
- lora_repo_id=lora_path,
117
  control_type="depth" if use_depth else "hed" if use_hed else "canny",
118
  repo_id="XLabs-AI/flux-controlnet-collections",
119
  name="flux-depth-controlnet.safetensors",
@@ -209,8 +207,6 @@ with gr.Blocks(css=custom_css) as demo:
209
  use_depth = gr.Checkbox(label="Use depth")
210
  use_hed = gr.Checkbox(label="Use hed")
211
  use_lora = gr.Checkbox(label="Use LORA", value=True)
212
- lora_path = gr.Textbox(label="LoraPath", value="XLabs-AI/flux-lora-collection")
213
- lora_name = gr.Textbox(label="LoraName", value="realism_lora.safetensors")
214
  lora_weight = gr.Slider(step=0.1, minimum=0, maximum=1, value=0.7, label="Lora Weight")
215
 
216
  true_gs = gr.Slider(step=0.1, minimum=0, maximum=10, value=3.5, label="TrueGs")
@@ -223,7 +219,7 @@ with gr.Blocks(css=custom_css) as demo:
223
  with gr.Column(scale=2, elem_classes="app"):
224
  output = gr.Gallery(label="Galery output", elem_classes="galery", selected_index=0)
225
 
226
- submit_btn.click(process_image, inputs=[input_image, prompt, steps, use_lora, controlnet, use_depth, use_hed, use_ip, lora_name, lora_path, lora_weight, negative_image, neg_prompt, true_gs, guidance, cfg], outputs=output)
227
 
228
  if __name__ == '__main__':
229
  demo.launch(share=True, debug=True)
 
 
 
 
 
 
 
1
 
2
+ from huggingface_hub import hf_hub_download, login
3
+ import os
4
  import spaces
5
  import gradio as gr
6
  from PIL import Image
7
  import os
8
 
9
  import random
10
+ login(os.getenv("HF_TOKEN"))
11
+ hf_hub_download("black-forest-labs/FLUX.1-dev", "flux1-dev.safetensors")
12
+ hf_hub_download("XLabs-AI/flux-controlnet-collections", "flux-depth-controlnet.safetensor")
13
+ hf_hub_download("XLabs-AI/flux-ip-adapter", "flux-ip-adapter.safetensors")
14
+ hf_hub_download("XLabs-AI/flux-controlnet-canny", "controlnet.safetensors")
15
 
16
  @spaces.GPU(duration=300)
17
+ def process_image(image, prompt, steps, use_lora, use_controlnet, use_depth, use_hed, use_ip lora_weight, negative_image, neg_prompt, true_gs, guidance, cfg):
18
  from src.flux.xflux_pipeline import XFluxPipeline
19
  def run_xflux_pipeline(
20
  prompt, image, repo_id, name, device,
 
111
  neg_prompt=neg_prompt,
112
  image=image,
113
  negative_image=negative_image,
 
114
  lora_weight=lora_weight,
 
115
  control_type="depth" if use_depth else "hed" if use_hed else "canny",
116
  repo_id="XLabs-AI/flux-controlnet-collections",
117
  name="flux-depth-controlnet.safetensors",
 
207
  use_depth = gr.Checkbox(label="Use depth")
208
  use_hed = gr.Checkbox(label="Use hed")
209
  use_lora = gr.Checkbox(label="Use LORA", value=True)
 
 
210
  lora_weight = gr.Slider(step=0.1, minimum=0, maximum=1, value=0.7, label="Lora Weight")
211
 
212
  true_gs = gr.Slider(step=0.1, minimum=0, maximum=10, value=3.5, label="TrueGs")
 
219
  with gr.Column(scale=2, elem_classes="app"):
220
  output = gr.Gallery(label="Galery output", elem_classes="galery", selected_index=0)
221
 
222
+ submit_btn.click(process_image, inputs=[input_image, prompt, steps, use_lora, controlnet, use_depth, use_hed, use_ip, lora_weight, negative_image, neg_prompt, true_gs, guidance, cfg], outputs=output)
223
 
224
  if __name__ == '__main__':
225
  demo.launch(share=True, debug=True)