AsmaAILab commited on
Commit
44953c4
·
verified ·
1 Parent(s): e9be03d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -32
app.py CHANGED
@@ -118,8 +118,11 @@ def estimate_depth(pil_image: Image.Image) ->Image.Image:
118
 
119
 
120
  def generate_image_for_gradio(
 
121
  input_image_for_depth: Image.Image,
122
- prompt: str
 
 
123
  ) -> Image.Image:
124
 
125
  global pipeline
@@ -152,49 +155,26 @@ def generate_image_for_gradio(
152
  generated_images = pipeline(
153
  prompt,
154
  image=input_image_for_pipeline,
155
- num_inference_steps=25,
156
- guidance_scale=8.0,
157
  generator=generator,
158
  ).images
159
 
160
- # with torch.no_grad():
161
- # generated_images = pipeline(
162
- # prompt,
163
- # negative_prompt,
164
- # image=input_image_for_pipeline,
165
- # num_inference_steps=25,
166
- # # guidance_scale=8.0,
167
- # strength = 0.85,
168
- # generator=generator,
169
- # ).images
170
-
171
  print(f"Image generation complete (seed: {seed}).")
172
  return generated_images[0]
173
 
174
 
175
 
176
- # iface = gr.Interface(
177
- # fn=generate_image_for_gradio,
178
- # inputs=[
179
- # gr.Textbox(label="Prompt", value="a high-quality photo of a modern interior design"),
180
- # gr.Image(type="pil", label="Input Image (for Depth Estimation)"),
181
- # gr.Slider(minimum=10, maximum=100, value=25, step=1, label="Inference Steps"),
182
- # gr.Slider(minimum=1.0, maximum=20.0, value=8.0, step=0.5, label="Guidance Scale"),
183
- # gr.Number(label="Seed (optional, leave blank for random)", value=None),
184
- # gr.Number(label="Resolution", value=512, interactive=False)
185
- # ],
186
- # outputs=gr.Image(type="pil", label="Generated Image"),
187
- # title="Stable Diffusion ControlNet Depth Demo (with Depth Estimation)",
188
- # description="Upload an input image, and the app will estimate its depth map, then use it with your prompt to generate a new image. This allows for structural guidance from your input photo.",
189
- # allow_flagging="never",
190
- # live=False,
191
- # theme=Soft(),
192
-
193
  iface = gr.Interface(
194
  fn=generate_image_for_gradio,
195
  inputs=[
196
- gr.Image(type="pil", label="Input Image (for Depth Estimation)"),
197
  gr.Textbox(label="Prompt", value="a high-quality photo of a modern interior design"),
 
 
 
 
 
198
  ],
199
  outputs=gr.Image(type="pil", label="Generated Image"),
200
  title="Stable Diffusion ControlNet Depth Demo (with Depth Estimation)",
 
118
 
119
 
120
  def generate_image_for_gradio(
121
+ prompt: str,
122
  input_image_for_depth: Image.Image,
123
+ num_inference_step: int,
124
+ guidance_scale: float,
125
+
126
  ) -> Image.Image:
127
 
128
  global pipeline
 
155
  generated_images = pipeline(
156
  prompt,
157
  image=input_image_for_pipeline,
158
+ num_inference_steps=num_inference_step,
159
+ guidance_scale = guidance_scale,
160
  generator=generator,
161
  ).images
162
 
163
+
 
 
 
 
 
 
 
 
 
 
164
  print(f"Image generation complete (seed: {seed}).")
165
  return generated_images[0]
166
 
167
 
168
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
169
  iface = gr.Interface(
170
  fn=generate_image_for_gradio,
171
  inputs=[
 
172
  gr.Textbox(label="Prompt", value="a high-quality photo of a modern interior design"),
173
+ gr.Image(type="pil", label="Input Image (for Depth Estimation)"),
174
+ gr.Slider(minimum=10, maximum=100, value=25, step=1, label="Inference Steps"),
175
+ gr.Slider(minimum=1.0, maximum=20.0, value=8.0, step=0.5, label="Guidance Scale"),
176
+ # gr.Number(label="Seed (optional, leave blank for random)", value=None),
177
+ # gr.Number(label="Resolution", value=512, interactive=False)
178
  ],
179
  outputs=gr.Image(type="pil", label="Generated Image"),
180
  title="Stable Diffusion ControlNet Depth Demo (with Depth Estimation)",