Himanshu-AT commited on
Commit
06e4d33
·
1 Parent(s): 3555612

save details

Browse files
Files changed (1) hide show
  1. app.py +49 -4
app.py CHANGED
@@ -8,6 +8,7 @@ import json
8
  from PIL import Image
9
  import torch
10
  from torchvision import transforms
 
11
 
12
  from diffusers import FluxFillPipeline, AutoencoderKL
13
  from PIL import Image
@@ -76,10 +77,9 @@ def calculate_optimal_dimensions(image: Image.Image):
76
  return width, height
77
 
78
  @spaces.GPU(durations=300)
79
- def infer(edit_images, prompt, lora_model, seed=42, randomize_seed=False, guidance_scale=3.5, num_inference_steps=28, progress=gr.Progress(track_tqdm=True)):
80
  # pipe.enable_xformers_memory_efficient_attention()
81
 
82
-
83
  if lora_model != "None":
84
  pipe.load_lora_weights(lora_models[lora_model])
85
  pipe.enable_lora()
@@ -100,6 +100,7 @@ def infer(edit_images, prompt, lora_model, seed=42, randomize_seed=False, guidan
100
  height=height,
101
  width=width,
102
  guidance_scale=guidance_scale,
 
103
  num_inference_steps=num_inference_steps,
104
  generator=torch.Generator(device='cuda').manual_seed(seed),
105
  # lora_scale=0.75 // not supported in this version
@@ -117,6 +118,35 @@ def download_image(image):
117
  image.save("output.png", "PNG")
118
  return "output.png"
119
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
120
  def set_image_as_inpaint(image):
121
  return image
122
 
@@ -203,7 +233,15 @@ with gr.Blocks(css=css) as demo:
203
  value=28,
204
  )
205
 
206
- # with gr.Row():
 
 
 
 
 
 
 
 
207
 
208
  # width = gr.Slider(
209
  # label="width",
@@ -224,12 +262,13 @@ with gr.Blocks(css=css) as demo:
224
  gr.on(
225
  triggers=[run_button.click, prompt.submit],
226
  fn = infer,
227
- inputs = [edit_image, prompt, lora_model, seed, randomize_seed, guidance_scale, num_inference_steps],
228
  outputs = [result, seed]
229
  )
230
 
231
  download_button = gr.Button("Download Image as PNG")
232
  set_inpaint_button = gr.Button("Set Image as Inpaint")
 
233
 
234
  download_button.click(
235
  fn=download_image,
@@ -243,6 +282,12 @@ with gr.Blocks(css=css) as demo:
243
  outputs=[edit_image]
244
  )
245
 
 
 
 
 
 
 
246
  # edit_image.select(
247
  # fn=generate_mask,
248
  # inputs=[edit_image, gr.Number(), gr.Number()],
 
8
  from PIL import Image
9
  import torch
10
  from torchvision import transforms
11
+ import zipfile
12
 
13
  from diffusers import FluxFillPipeline, AutoencoderKL
14
  from PIL import Image
 
77
  return width, height
78
 
79
  @spaces.GPU(durations=300)
80
+ def infer(edit_images, prompt, lora_model, strength, seed=42, randomize_seed=False, guidance_scale=3.5, num_inference_steps=28, progress=gr.Progress(track_tqdm=True)):
81
  # pipe.enable_xformers_memory_efficient_attention()
82
 
 
83
  if lora_model != "None":
84
  pipe.load_lora_weights(lora_models[lora_model])
85
  pipe.enable_lora()
 
100
  height=height,
101
  width=width,
102
  guidance_scale=guidance_scale,
103
+ strength=strength,
104
  num_inference_steps=num_inference_steps,
105
  generator=torch.Generator(device='cuda').manual_seed(seed),
106
  # lora_scale=0.75 // not supported in this version
 
118
  image.save("output.png", "PNG")
119
  return "output.png"
120
 
121
+ def save_details(image, mask, prompt, lora_model, strength, seed, guidance_scale, num_inference_steps):
122
+ if isinstance(image, np.ndarray):
123
+ image = Image.fromarray(image)
124
+ if isinstance(mask, np.ndarray):
125
+ mask = Image.fromarray(mask)
126
+
127
+ image.save("saved_image.png", "PNG")
128
+ mask.save("saved_mask.png", "PNG")
129
+
130
+ details = {
131
+ "prompt": prompt,
132
+ "lora_model": lora_model,
133
+ "strength": strength,
134
+ "seed": seed,
135
+ "guidance_scale": guidance_scale,
136
+ "num_inference_steps": num_inference_steps
137
+ }
138
+
139
+ with open("details.json", "w") as f:
140
+ json.dump(details, f)
141
+
142
+ # Create a ZIP file
143
+ with zipfile.ZipFile("output.zip", "w") as zipf:
144
+ zipf.write("saved_image.png")
145
+ zipf.write("saved_mask.png")
146
+ zipf.write("details.json")
147
+
148
+ return "output.zip"
149
+
150
  def set_image_as_inpaint(image):
151
  return image
152
 
 
233
  value=28,
234
  )
235
 
236
+ with gr.Row():
237
+
238
+ strength = gr.Slider(
239
+ label="Strength",
240
+ minimum=0,
241
+ maximum=1,
242
+ step=0.01,
243
+ value=0.85,
244
+ )
245
 
246
  # width = gr.Slider(
247
  # label="width",
 
262
  gr.on(
263
  triggers=[run_button.click, prompt.submit],
264
  fn = infer,
265
+ inputs = [edit_image, prompt, lora_model, strength, seed, randomize_seed, guidance_scale, num_inference_steps],
266
  outputs = [result, seed]
267
  )
268
 
269
  download_button = gr.Button("Download Image as PNG")
270
  set_inpaint_button = gr.Button("Set Image as Inpaint")
271
+ save_button = gr.Button("Save Details")
272
 
273
  download_button.click(
274
  fn=download_image,
 
282
  outputs=[edit_image]
283
  )
284
 
285
+ save_button.click(
286
+ fn=save_details,
287
+ inputs=[result, edit_image, prompt, lora_model, strength, seed, guidance_scale, num_inference_steps],
288
+ outputs=gr.Text(label="Save Status")
289
+ )
290
+
291
  # edit_image.select(
292
  # fn=generate_mask,
293
  # inputs=[edit_image, gr.Number(), gr.Number()],