Deadmon commited on
Commit
1368e65
·
verified ·
1 Parent(s): 754b60e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +46 -21
app.py CHANGED
@@ -1,33 +1,58 @@
1
  import torch
2
  from diffusers.utils import load_image
3
  from diffusers import FluxControlNetPipeline, FluxControlNetModel, FluxMultiControlNetModel
 
 
4
 
5
  base_model = 'black-forest-labs/FLUX.1-dev'
6
  controlnet_model_union = 'InstantX/FLUX.1-dev-Controlnet-Union'
7
 
8
  controlnet_union = FluxControlNetModel.from_pretrained(controlnet_model_union, torch_dtype=torch.bfloat16)
9
- controlnet = FluxMultiControlNetModel([controlnet_union]) # we always recommend loading via FluxMultiControlNetModel
10
 
11
  pipe = FluxControlNetPipeline.from_pretrained(base_model, controlnet=controlnet, torch_dtype=torch.bfloat16)
12
  pipe.to("cuda")
13
 
14
- prompt = 'A bohemian-style female travel blogger with sun-kissed skin and messy beach waves.'
15
- control_image_depth = load_image("https://huggingface.co/InstantX/FLUX.1-dev-Controlnet-Union/resolve/main/images/depth.jpg")
16
- control_mode_depth = 2
17
-
18
- control_image_canny = load_image("https://huggingface.co/InstantX/FLUX.1-dev-Controlnet-Union/resolve/main/images/canny.jpg")
19
- control_mode_canny = 0
20
-
21
- width, height = control_image.size
22
-
23
- image = pipe(
24
- prompt,
25
- control_image=[control_image_depth, control_image_canny],
26
- control_mode=[control_mode_depth, control_mode_canny],
27
- width=width,
28
- height=height,
29
- controlnet_conditioning_scale=[0.2, 0.4],
30
- num_inference_steps=24,
31
- guidance_scale=3.5,
32
- generator=torch.manual_seed(42),
33
- ).images[0]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import torch
2
  from diffusers.utils import load_image
3
  from diffusers import FluxControlNetPipeline, FluxControlNetModel, FluxMultiControlNetModel
4
+ import gradio as gr
5
+ import spaces
6
 
7
  base_model = 'black-forest-labs/FLUX.1-dev'
8
  controlnet_model_union = 'InstantX/FLUX.1-dev-Controlnet-Union'
9
 
10
  controlnet_union = FluxControlNetModel.from_pretrained(controlnet_model_union, torch_dtype=torch.bfloat16)
11
+ controlnet = FluxMultiControlNetModel([controlnet_union]) # we always recommend loading via FluxMultiControlNetModel
12
 
13
  pipe = FluxControlNetPipeline.from_pretrained(base_model, controlnet=controlnet, torch_dtype=torch.bfloat16)
14
  pipe.to("cuda")
15
 
16
+ control_modes = [
17
+ "canny",
18
+ "tile",
19
+ "depth",
20
+ "blur",
21
+ "pose",
22
+ "gray",
23
+ "lq"
24
+ ]
25
+
26
+ @spaces.GPU
27
+ def generate_image(prompt, control_image_depth, control_mode_depth, control_image_canny, control_mode_canny):
28
+ width, height = control_image_depth.size
29
+
30
+ image = pipe(
31
+ prompt,
32
+ control_image=[control_image_depth, control_image_canny],
33
+ control_mode=[control_mode_depth, control_mode_canny],
34
+ width=width,
35
+ height=height,
36
+ controlnet_conditioning_scale=[0.2, 0.4],
37
+ num_inference_steps=24,
38
+ guidance_scale=3.5,
39
+ generator=torch.manual_seed(42),
40
+ ).images[0]
41
+
42
+ return image
43
+
44
+ iface = gr.Interface(
45
+ fn=generate_image,
46
+ inputs=[
47
+ gr.Text(label="Prompt"),
48
+ gr.Image(label="Control Image (Depth)"),
49
+ gr.Dropdown(choices=control_modes, value="depth", label="Control Mode (Depth)"),
50
+ gr.Image(label="Control Image (Canny)"),
51
+ gr.Dropdown(choices=control_modes, value="canny", label="Control Mode (Canny)")
52
+ ],
53
+ outputs=gr.Image(label="Generated Image"),
54
+ title="FluxControlNet Image Generation",
55
+ description="Generate an image using FluxControlNet with depth and canny control images.",
56
+ )
57
+
58
+ iface.launch()