Himanshu806 commited on
Commit
50d6b20
·
verified ·
1 Parent(s): e297481

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +98 -37
app.py CHANGED
@@ -1,63 +1,73 @@
1
  import gradio as gr
2
- import spaces
3
  import numpy as np
 
 
4
  import torch
 
5
  import random
 
6
  from diffusers import FluxInpaintPipeline
7
  from PIL import Image
8
 
 
9
  MAX_SEED = np.iinfo(np.int32).max
10
  MAX_IMAGE_SIZE = 2048
11
 
12
- # Load pipeline with VAE enabled
13
- pipe = FluxInpaintPipeline.from_pretrained(
14
- "black-forest-labs/FLUX.1-Fill-dev"
15
- # torch_dtype=torch.bfloat16
16
- ).to("cuda")
17
  pipe.load_lora_weights("alvdansen/flux-koda")
 
 
18
  pipe.enable_lora()
19
- pipe.vae.enable_slicing() # Enable slicing for better memory efficiency
20
- pipe.vae.enable_tiling() # Enable tiling for larger images
21
 
22
  def calculate_optimal_dimensions(image: Image.Image):
 
23
  original_width, original_height = image.size
 
 
24
  MIN_ASPECT_RATIO = 9 / 16
25
  MAX_ASPECT_RATIO = 16 / 9
26
  FIXED_DIMENSION = 1024
27
 
 
28
  original_aspect_ratio = original_width / original_height
29
 
30
- if original_aspect_ratio > 1:
 
31
  width = FIXED_DIMENSION
32
  height = round(FIXED_DIMENSION / original_aspect_ratio)
33
- else:
34
  height = FIXED_DIMENSION
35
  width = round(FIXED_DIMENSION * original_aspect_ratio)
36
 
 
37
  width = (width // 8) * 8
38
  height = (height // 8) * 8
39
 
 
40
  calculated_aspect_ratio = width / height
41
  if calculated_aspect_ratio > MAX_ASPECT_RATIO:
42
  width = (height * MAX_ASPECT_RATIO // 8) * 8
43
  elif calculated_aspect_ratio < MIN_ASPECT_RATIO:
44
  height = (width / MIN_ASPECT_RATIO // 8) * 8
45
 
 
46
  width = max(width, 576) if width == FIXED_DIMENSION else width
47
  height = max(height, 576) if height == FIXED_DIMENSION else height
48
 
49
  return width, height
50
 
51
- @spaces.GPU
52
- def infer(edit_images, prompt, seed=42, randomize_seed=False, width=1024, height=1024, guidance_scale=3.5, num_inference_steps=28):
 
 
53
  image = edit_images["background"]
54
  width, height = calculate_optimal_dimensions(image)
55
  mask = edit_images["layers"][0]
56
  if randomize_seed:
57
  seed = random.randint(0, MAX_SEED)
58
-
59
- # Run the inpainting pipeline
60
- output = pipe(
61
  prompt=prompt,
62
  image=image,
63
  mask_image=mask,
@@ -66,15 +76,23 @@ def infer(edit_images, prompt, seed=42, randomize_seed=False, width=1024, height
66
  guidance_scale=guidance_scale,
67
  num_inference_steps=num_inference_steps,
68
  generator=torch.Generator(device='cuda').manual_seed(seed),
69
- )
 
70
 
71
- output_image = output.images[0]
72
- output_image_jpg = output_image.convert("RGB")
73
  output_image_jpg.save("output.jpg", "JPEG")
74
 
75
  return output_image_jpg, seed
 
76
 
77
- css = """
 
 
 
 
 
 
 
78
  #col-container {
79
  margin: 0 auto;
80
  max-width: 1000px;
@@ -82,44 +100,87 @@ css = """
82
  """
83
 
84
  with gr.Blocks(css=css) as demo:
 
85
  with gr.Column(elem_id="col-container"):
86
- gr.Markdown("# FLUX.1 [dev]")
 
87
  with gr.Row():
88
  with gr.Column():
89
  edit_image = gr.ImageEditor(
90
- label="Upload and draw mask for inpainting",
91
- type="pil",
92
  sources=["upload", "webcam"],
93
- image_mode="RGB",
94
- layers=True,
95
  brush=gr.Brush(colors=["#FFFFFF"]),
 
96
  )
97
- prompt = gr.Textbox(
98
  label="Prompt",
99
  show_label=False,
100
  max_lines=2,
101
  placeholder="Enter your prompt",
 
102
  )
103
  run_button = gr.Button("Run")
104
 
105
  result = gr.Image(label="Result", show_label=False)
106
 
107
  with gr.Accordion("Advanced Settings", open=False):
 
108
  seed = gr.Slider(
109
- label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0
 
 
 
 
110
  )
 
111
  randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
112
- guidance_scale = gr.Slider(
113
- label="Guidance Scale", minimum=1, maximum=30, step=0.5, value=3.5
114
- )
115
- num_inference_steps = gr.Slider(
116
- label="Number of inference steps", minimum=1, maximum=50, step=1, value=28
117
- )
118
 
119
- run_button.click(
120
- fn=infer,
121
- inputs=[edit_image, prompt, seed, randomize_seed, guidance_scale, num_inference_steps],
122
- outputs=[result, seed],
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
123
  )
124
 
125
- demo.launch()
 
1
  import gradio as gr
 
2
  import numpy as np
3
+
4
+ import spaces
5
  import torch
6
+ import spaces
7
  import random
8
+
9
  from diffusers import FluxInpaintPipeline
10
  from PIL import Image
11
 
12
+
13
  MAX_SEED = np.iinfo(np.int32).max
14
  MAX_IMAGE_SIZE = 2048
15
 
16
+ pipe = FluxInpaintPipeline.from_pretrained("black-forest-labs/FLUX.1-Fill-dev", torch_dtype=torch.bfloat16).to("cuda")
 
 
 
 
17
  pipe.load_lora_weights("alvdansen/flux-koda")
18
+ # pipe.enable_sequential_cpu_offload()
19
+ # pipe.enable_fp16()
20
  pipe.enable_lora()
21
+ # pipe.vae.enable_slicing()
22
+ # pipe.vae.enable_tiling()
23
 
24
  def calculate_optimal_dimensions(image: Image.Image):
25
+ # Extract the original dimensions
26
  original_width, original_height = image.size
27
+
28
+ # Set constants
29
  MIN_ASPECT_RATIO = 9 / 16
30
  MAX_ASPECT_RATIO = 16 / 9
31
  FIXED_DIMENSION = 1024
32
 
33
+ # Calculate the aspect ratio of the original image
34
  original_aspect_ratio = original_width / original_height
35
 
36
+ # Determine which dimension to fix
37
+ if original_aspect_ratio > 1: # Wider than tall
38
  width = FIXED_DIMENSION
39
  height = round(FIXED_DIMENSION / original_aspect_ratio)
40
+ else: # Taller than wide
41
  height = FIXED_DIMENSION
42
  width = round(FIXED_DIMENSION * original_aspect_ratio)
43
 
44
+ # Ensure dimensions are multiples of 8
45
  width = (width // 8) * 8
46
  height = (height // 8) * 8
47
 
48
+ # Enforce aspect ratio limits
49
  calculated_aspect_ratio = width / height
50
  if calculated_aspect_ratio > MAX_ASPECT_RATIO:
51
  width = (height * MAX_ASPECT_RATIO // 8) * 8
52
  elif calculated_aspect_ratio < MIN_ASPECT_RATIO:
53
  height = (width / MIN_ASPECT_RATIO // 8) * 8
54
 
55
+ # Ensure width and height remain above the minimum dimensions
56
  width = max(width, 576) if width == FIXED_DIMENSION else width
57
  height = max(height, 576) if height == FIXED_DIMENSION else height
58
 
59
  return width, height
60
 
61
+ @spaces.GPU(durations=300)
62
+ def infer(edit_images, prompt, seed=42, randomize_seed=False, width=1024, height=1024, guidance_scale=3.5, num_inference_steps=28, progress=gr.Progress(track_tqdm=True)):
63
+ # pipe.enable_xformers_memory_efficient_attention()
64
+
65
  image = edit_images["background"]
66
  width, height = calculate_optimal_dimensions(image)
67
  mask = edit_images["layers"][0]
68
  if randomize_seed:
69
  seed = random.randint(0, MAX_SEED)
70
+ image = pipe(
 
 
71
  prompt=prompt,
72
  image=image,
73
  mask_image=mask,
 
76
  guidance_scale=guidance_scale,
77
  num_inference_steps=num_inference_steps,
78
  generator=torch.Generator(device='cuda').manual_seed(seed),
79
+ # lora_scale=0.75 // not supported in this version
80
+ ).images[0]
81
 
82
+ output_image_jpg = image.convert("RGB")
 
83
  output_image_jpg.save("output.jpg", "JPEG")
84
 
85
  return output_image_jpg, seed
86
+ # return image, seed
87
 
88
+ examples = [
89
+ "photography of a young woman, accent lighting, (front view:1.4), "
90
+ # "a tiny astronaut hatching from an egg on the moon",
91
+ # "a cat holding a sign that says hello world",
92
+ # "an anime illustration of a wiener schnitzel",
93
+ ]
94
+
95
+ css="""
96
  #col-container {
97
  margin: 0 auto;
98
  max-width: 1000px;
 
100
  """
101
 
102
  with gr.Blocks(css=css) as demo:
103
+
104
  with gr.Column(elem_id="col-container"):
105
+ gr.Markdown(f"""# FLUX.1 [dev]
106
+ """)
107
  with gr.Row():
108
  with gr.Column():
109
  edit_image = gr.ImageEditor(
110
+ label='Upload and draw mask for inpainting',
111
+ type='pil',
112
  sources=["upload", "webcam"],
113
+ image_mode='RGB',
114
+ layers=False,
115
  brush=gr.Brush(colors=["#FFFFFF"]),
116
+ # height=600
117
  )
118
+ prompt = gr.Text(
119
  label="Prompt",
120
  show_label=False,
121
  max_lines=2,
122
  placeholder="Enter your prompt",
123
+ container=False,
124
  )
125
  run_button = gr.Button("Run")
126
 
127
  result = gr.Image(label="Result", show_label=False)
128
 
129
  with gr.Accordion("Advanced Settings", open=False):
130
+
131
  seed = gr.Slider(
132
+ label="Seed",
133
+ minimum=0,
134
+ maximum=MAX_SEED,
135
+ step=1,
136
+ value=0,
137
  )
138
+
139
  randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
 
 
 
 
 
 
140
 
141
+ with gr.Row():
142
+
143
+ width = gr.Slider(
144
+ label="Width",
145
+ minimum=256,
146
+ maximum=MAX_IMAGE_SIZE,
147
+ step=32,
148
+ value=1024,
149
+ visible=False
150
+ )
151
+
152
+ height = gr.Slider(
153
+ label="Height",
154
+ minimum=256,
155
+ maximum=MAX_IMAGE_SIZE,
156
+ step=32,
157
+ value=1024,
158
+ visible=False
159
+ )
160
+
161
+ with gr.Row():
162
+
163
+ guidance_scale = gr.Slider(
164
+ label="Guidance Scale",
165
+ minimum=1,
166
+ maximum=30,
167
+ step=0.5,
168
+ value=50,
169
+ )
170
+
171
+ num_inference_steps = gr.Slider(
172
+ label="Number of inference steps",
173
+ minimum=1,
174
+ maximum=50,
175
+ step=1,
176
+ value=28,
177
+ )
178
+
179
+ gr.on(
180
+ triggers=[run_button.click, prompt.submit],
181
+ fn = infer,
182
+ inputs = [edit_image, prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps],
183
+ outputs = [result, seed]
184
  )
185
 
186
+ demo.launch()