iimmortall commited on
Commit
92229a9
·
1 Parent(s): 0b40a67
Files changed (1) hide show
  1. app.py +18 -89
app.py CHANGED
@@ -22,7 +22,7 @@ model_folder = snapshot_download(repo_id=model_name, token=auth_token, local_dir
22
  # sys.path.insert(0, model_folder)
23
  # print(sys.path)
24
 
25
- from ultrafusion_utils import load_model, run_ultrafusion
26
 
27
  to_tensor = ToTensor()
28
  to_pil = ToPILImage()
@@ -38,36 +38,23 @@ else:
38
  MAX_SEED = np.iinfo(np.int32).max
39
  MAX_IMAGE_SIZE = 1024
40
 
41
- @spaces.GPU(duration=10) #[uncomment to use ZeroGPU]
42
  def infer(
43
  under_expo_img,
44
  over_expo_img,
45
- # progress=gr.Progress(track_tqdm=True),
46
  ):
47
- # if randomize_seed:
48
- # seed = random.randint(0, MAX_SEED)
49
- # generator = torch.Generator().manual_seed(seed)
50
-
51
- # image = pipe(
52
- # prompt=prompt,
53
- # negative_prompt=negative_prompt,
54
- # guidance_scale=guidance_scale,
55
- # num_inference_steps=num_inference_steps,
56
- # width=width,
57
- # height=height,
58
- # generator=generator,
59
- # ).images[0]
60
-
61
  print(under_expo_img.size)
62
  print("reciving image")
63
 
64
- under_expo_img = under_expo_img.resize([1500, 1000])
65
- over_expo_img = over_expo_img.resize([1500, 1000])
 
66
 
67
  ue = to_tensor(under_expo_img).unsqueeze(dim=0).to("cuda")
68
  oe = to_tensor(over_expo_img).unsqueeze(dim=0).to("cuda")
69
 
70
- out = run_ultrafusion(ue, oe, 'test', flow_model=flow_model, pipe=ultrafusion_pipe, consistent_start=None)
71
 
72
  out = out.clamp(0, 1).squeeze()
73
  out_pil = to_pil(out)
@@ -143,84 +130,26 @@ with gr.Blocks(css=css) as demo:
143
  width=IMG_W*2,
144
  height=IMG_H*2,
145
  )
146
-
147
- # with gr.Row():
148
- # prompt = gr.Text(
149
- # label="Prompt",
150
- # show_label=False,
151
- # max_lines=1,
152
- # placeholder="Enter your prompt",
153
- # container=False,
154
- # )
155
- # negative_prompt = gr.Text(
156
- # label="Negative prompt",
157
- # max_lines=1,
158
- # placeholder="Enter a negative prompt",
159
- # visible=False,
160
- # )
161
- # with gr.Accordion("Advanced Settings", open=False):
162
- # negative_prompt = gr.Text(
163
- # label="Negative prompt",
164
- # max_lines=1,
165
- # placeholder="Enter a negative prompt",
166
- # visible=False,
167
- # )
168
-
169
- # seed = gr.Slider(
170
- # label="Seed",
171
- # minimum=0,
172
- # maximum=MAX_SEED,
173
- # step=1,
174
- # value=0,
175
- # )
176
-
177
- # randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
178
-
179
- # with gr.Row():
180
- # width = gr.Slider(
181
- # label="Width",
182
- # minimum=256,
183
- # maximum=MAX_IMAGE_SIZE,
184
- # step=32,
185
- # value=1024, # Replace with defaults that work for your model
186
- # )
187
-
188
- # height = gr.Slider(
189
- # label="Height",
190
- # minimum=256,
191
- # maximum=MAX_IMAGE_SIZE,
192
- # step=32,
193
- # value=1024, # Replace with defaults that work for your model
194
- # )
195
-
196
- # with gr.Row():
197
- # guidance_scale = gr.Slider(
198
- # label="Guidance scale",
199
- # minimum=0.0,
200
- # maximum=10.0,
201
- # step=0.1,
202
- # value=0.0, # Replace with defaults that work for your model
203
- # )
204
-
205
- # num_inference_steps = gr.Slider(
206
- # label="Number of inference steps",
207
- # minimum=1,
208
- # maximum=50,
209
- # step=1,
210
- # value=2, # Replace with defaults that work for your model
211
- # )
212
 
213
  gr.Examples(
214
  examples=examples,
215
  inputs=[under_expo_img, over_expo_img],
216
  label="Examples",
217
  # examples_per_page=10,
218
- cache_examples=False,
219
- # fn=infer,
220
  )
221
  # gr.Markdown(_CITE_)
222
  run_button.click(fn=infer,
223
- inputs=[under_expo_img, over_expo_img],
224
  outputs=[result,],
225
  )
226
 
 
22
  # sys.path.insert(0, model_folder)
23
  # print(sys.path)
24
 
25
+ from ultrafusion_utils import load_model, run_ultrafusion, check_input
26
 
27
  to_tensor = ToTensor()
28
  to_pil = ToPILImage()
 
38
  MAX_SEED = np.iinfo(np.int32).max
39
  MAX_IMAGE_SIZE = 1024
40
 
41
+ @spaces.GPU(duration=60) #[uncomment to use ZeroGPU]
42
  def infer(
43
  under_expo_img,
44
  over_expo_img,
45
+ num_inference_steps
46
  ):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
47
  print(under_expo_img.size)
48
  print("reciving image")
49
 
50
+ # under_expo_img = under_expo_img.resize([1500, 1000])
51
+ # over_expo_img = over_expo_img.resize([1500, 1000])
52
+ under_expo_img, over_expo_img = check_input(under_expo_img, over_expo_img, max_l=1500)
53
 
54
  ue = to_tensor(under_expo_img).unsqueeze(dim=0).to("cuda")
55
  oe = to_tensor(over_expo_img).unsqueeze(dim=0).to("cuda")
56
 
57
+ out = run_ultrafusion(ue, oe, 'test', flow_model=flow_model, pipe=ultrafusion_pipe, steps=num_inference_steps, consistent_start=None)
58
 
59
  out = out.clamp(0, 1).squeeze()
60
  out_pil = to_pil(out)
 
130
  width=IMG_W*2,
131
  height=IMG_H*2,
132
  )
133
+ with gr.Accordion("Advanced Settings", open=True):
134
+ num_inference_steps = gr.Slider(
135
+ label="Number of inference steps",
136
+ minimum=2,
137
+ maximum=50,
138
+ step=1,
139
+ value=20, # Replace with defaults that work for your model
140
+ interactive=True
141
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
142
 
143
  gr.Examples(
144
  examples=examples,
145
  inputs=[under_expo_img, over_expo_img],
146
  label="Examples",
147
  # examples_per_page=10,
148
+ cache_examples=True,
 
149
  )
150
  # gr.Markdown(_CITE_)
151
  run_button.click(fn=infer,
152
+ inputs=[under_expo_img, over_expo_img, num_inference_steps],
153
  outputs=[result,],
154
  )
155