iimmortall commited on
Commit
87be4e2
Β·
verified Β·
1 Parent(s): ced491a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +134 -107
app.py CHANGED
@@ -14,15 +14,13 @@ from huggingface_hub import hf_hub_download, snapshot_download
14
 
15
  model_name = "iimmortall/UltraFusion"
16
  auth_token = os.getenv("HF_AUTH_TOKEN")
17
- # greet_file = hf_hub_download(repo_id=model_name, filename="main.py", use_auth_token=auth_token)
18
- # sys.path.append(os.path.split(greet_file)[0])
19
  model_folder = snapshot_download(repo_id=model_name, token=auth_token, local_dir="/home/user/app")
20
- # sys.path.append(model_folder)
21
- # sys.path.insert(0, model_folder)
22
- # print(sys.path)
23
 
24
  from ultrafusion_utils import load_model, run_ultrafusion, check_input
25
 
 
 
26
  to_tensor = ToTensor()
27
  to_pil = ToPILImage()
28
  ultrafusion_pipe, flow_model = load_model()
@@ -33,7 +31,6 @@ if torch.cuda.is_available():
33
  else:
34
  torch_dtype = torch.float32
35
 
36
-
37
  MAX_SEED = np.iinfo(np.int32).max
38
  MAX_IMAGE_SIZE = 1024
39
 
@@ -45,6 +42,7 @@ def infer(
45
  ):
46
  print(under_expo_img.size)
47
  print("reciving image")
 
48
 
49
  # under_expo_img = under_expo_img.resize([1500, 1000])
50
  # over_expo_img = over_expo_img.resize([1500, 1000])
@@ -52,119 +50,148 @@ def infer(
52
 
53
  ue = to_tensor(under_expo_img).unsqueeze(dim=0).to("cuda")
54
  oe = to_tensor(over_expo_img).unsqueeze(dim=0).to("cuda")
 
 
 
 
 
 
 
55
 
56
- out = run_ultrafusion(ue, oe, 'test', flow_model=flow_model, pipe=ultrafusion_pipe, steps=num_inference_steps, consistent_start=None)
 
57
 
58
  out = out.clamp(0, 1).squeeze()
59
  out_pil = to_pil(out)
60
 
 
 
 
 
 
61
  return out_pil
62
 
63
 
64
- examples= [
65
- [os.path.join("examples", img_name, "ue.jpg"),
66
- os.path.join("examples", img_name, "oe.jpg")] for img_name in sorted(os.listdir("examples"))
67
- ]
68
- IMG_W = 320
69
- IMG_H = 240
70
- css = """
71
- #col-container {
72
- margin: 0 auto;
73
- max-width: 640px;
74
- }
75
- """
76
- # max-heigh: 1500px;
77
-
78
- _HEADER_ = r"""
79
- <h1 style="text-align: center;"><b>UltraFusion</b></h1>
80
-
81
- - This is an HDR algorithm that fuses two images with different exposures.
82
-
83
- - This can fuse two images with a very large exposure difference, even up to 9 stops.
84
-
85
- - The maximum resolution we support is 1500 x 1500. If the images you upload are larger than this, they will be downscaled while maintaining the original aspect ratio.
86
-
87
- - The two input images should have the same resolution; otherwise, an error will be reported.
88
-
89
- - This is only for internal testing. Do not share it publicly.
90
- """
91
-
92
- _CITE_ = r"""
93
- πŸ“ **Citation**
94
-
95
- If you find our work useful for your research or applications, please cite using this bibtex:
96
- ```bibtex
97
- @article{xxx,
98
- title={xxx},
99
- author={xxx},
100
- journal={arXiv preprint arXiv:xx.xx},
101
- year={2024}
102
- }
103
- ```
104
-
105
- πŸ“‹ **License**
106
-
107
- CC BY-NC 4.0. LICENSE.
108
-
109
- πŸ“§ **Contact**
110
-
111
- If you have any questions, feel free to open a discussion or contact us at <b>[email protected]</b>.
112
- """
113
-
114
- with gr.Blocks(css=css) as demo:
115
- with gr.Column(elem_id="col-container"):
116
- gr.Markdown(_HEADER_)
117
- with gr.Row():
118
- under_expo_img = gr.Image(label="UnderExposureImage", show_label=True,
119
- image_mode="RGB",
120
- sources=["upload", ],
121
- width=IMG_W,
122
- height=IMG_H,
123
- type="pil"
124
- )
125
- over_expo_img = gr.Image(label="OverExposureImage", show_label=True,
126
- image_mode="RGB",
127
- sources=["upload", ],
128
- width=IMG_W,
129
- height=IMG_H,
130
- type="pil"
131
- )
132
- with gr.Row():
133
- run_button = gr.Button("Run", variant="primary") # scale=0,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
134
 
135
- result = gr.Image(label="Result", show_label=True,
136
- type='pil',
137
- image_mode='RGB',
138
- format="png",
139
- width=IMG_W*2,
140
- height=IMG_H*2,
 
 
 
 
 
 
 
 
141
  )
142
- with gr.Accordion("Advanced Settings", open=True):
143
- num_inference_steps = gr.Slider(
144
- label="Number of inference steps",
145
- minimum=2,
146
- maximum=50,
147
- step=1,
148
- value=20, # Replace with defaults that work for your model
149
- interactive=True
150
- )
151
-
152
- gr.Examples(
153
- examples=examples,
154
- inputs=[under_expo_img, over_expo_img, num_inference_steps],
155
- label="Examples",
156
- # examples_per_page=10,
157
- fn=infer,
158
- cache_examples=True,
159
- outputs=[result,],
160
- )
161
- # gr.Markdown(_CITE_)
162
- run_button.click(fn=infer,
163
- inputs=[under_expo_img, over_expo_img, num_inference_steps],
164
- outputs=[result,],
165
- )
166
 
167
  if __name__ == "__main__":
 
168
  demo.queue(max_size=10)
169
  demo.launch(share=True)
170
  # demo.launch(server_name="0.0.0.0", debug=True, show_api=True, show_error=True, share=False)
 
14
 
15
  model_name = "iimmortall/UltraFusion"
16
  auth_token = os.getenv("HF_AUTH_TOKEN")
17
+ # greet_file = hf_hub_download(repo_id=model_name, filename="main.py", use_auth_token=auth_token)
 
18
  model_folder = snapshot_download(repo_id=model_name, token=auth_token, local_dir="/home/user/app")
 
 
 
19
 
20
  from ultrafusion_utils import load_model, run_ultrafusion, check_input
21
 
22
+ RUN_TIMES = 0
23
+
24
  to_tensor = ToTensor()
25
  to_pil = ToPILImage()
26
  ultrafusion_pipe, flow_model = load_model()
 
31
  else:
32
  torch_dtype = torch.float32
33
 
 
34
  MAX_SEED = np.iinfo(np.int32).max
35
  MAX_IMAGE_SIZE = 1024
36
 
 
42
  ):
43
  print(under_expo_img.size)
44
  print("reciving image")
45
+ # print(under_expo_img.orig_name, over_expo_img.orig_name)
46
 
47
  # under_expo_img = under_expo_img.resize([1500, 1000])
48
  # over_expo_img = over_expo_img.resize([1500, 1000])
 
50
 
51
  ue = to_tensor(under_expo_img).unsqueeze(dim=0).to("cuda")
52
  oe = to_tensor(over_expo_img).unsqueeze(dim=0).to("cuda")
53
+ print("num_inference_steps:", num_inference_steps)
54
+ try:
55
+ if num_inference_steps is None:
56
+ num_inference_steps = 20
57
+ num_inference_steps = int(num_inference_steps)
58
+ except Exception as e:
59
+ num_inference_steps = 20
60
 
61
+ out = run_ultrafusion(ue, oe, 'test', flow_model=flow_model, pipe=ultrafusion_pipe,
62
+ steps=num_inference_steps, consistent_start=None)
63
 
64
  out = out.clamp(0, 1).squeeze()
65
  out_pil = to_pil(out)
66
 
67
+ global RUN_TIMES
68
+ RUN_TIMES = RUN_TIMES + 1
69
+ print("---------------------------- Using Times---------------------------------------")
70
+ print(f"{datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')}: Using times: {RUN_TIMES}")
71
+
72
  return out_pil
73
 
74
 
75
+ def build_demo():
76
+ examples= [
77
+ [os.path.join("examples", img_name, "ue.jpg"),
78
+ os.path.join("examples", img_name, "oe.jpg")] for img_name in sorted(os.listdir("examples"))
79
+ ]
80
+ IMG_W = 320
81
+ IMG_H = 240
82
+ css = """
83
+ #col-container {
84
+ margin: 0 auto;
85
+ max-width: 640px;
86
+ }
87
+ """
88
+ # max-heigh: 1500px;
89
+
90
+ _README_ = r"""
91
+
92
+ - This is an HDR algorithm that fuses two images with different exposures.
93
+
94
+ - This can fuse two images with a very large exposure difference, even up to 9 stops.
95
+
96
+ - The two input images should have the same resolution; otherwise, an error will be reported.
97
+
98
+ - We are committed to not storing any data you upload or the results of its processing.
99
+
100
+ """
101
+ # - The maximum resolution we support is 1500 x 1500. If the images you upload are larger than this, they will be downscaled while maintaining the original aspect ratio.
102
+ # - This is only for internal testing. Do not share it publicly.
103
+ _CITE_ = r"""
104
+ πŸ“ **Citation**
105
+
106
+ If you find our work useful for your research or applications, please cite using this bibtex:
107
+ ```bibtex
108
+ @article{xxx,
109
+ title={xxx},
110
+ author={xxx},
111
+ journal={arXiv preprint arXiv:xx.xx},
112
+ year={2024}
113
+ }
114
+ ```
115
+
116
+ πŸ“‹ **License**
117
+
118
+ CC BY-NC 4.0. LICENSE.
119
+
120
+ πŸ“§ **Contact**
121
+
122
+ If you have any questions, feel free to open a discussion or contact us at <b>[email protected]</b>.
123
+ """
124
+
125
+ with gr.Blocks(css=css) as demo:
126
+ with gr.Column(elem_id="col-container"):
127
+ gr.Markdown("""<h1 style="text-align: center; font-size: 32px;"><b>UltraFusion πŸ“Έβœ¨</b></h1>""")
128
+ gr.Markdown("""<h1 style="text-align: center; font-size: 24px;"><b>How to Capture Short and Long Exposure Images</b></h1>""")
129
+ with gr.Row():
130
+ gr.Image("ui/en-short.png", width=IMG_W*2//3, show_label=False, interactive=False, show_download_button=False) #, height=IMG_H*2
131
+ gr.Image("ui/en-tap.png", width=IMG_W*2//3, show_label=False, interactive=False, show_download_button=False)
132
+ gr.Image("ui/en-long.png", width=IMG_W*2//3, show_label=False, interactive=False, show_download_button=False)
133
+
134
+ with gr.Row():
135
+ gr.Markdown("""<h1 style="text-align: center; font-size: 12px;"><b>➁ Drag the β˜€οΈŽ icon downward to capture a photo with a shorter exposure.</b></h1>""")
136
+ gr.Markdown("""<h1 style="text-align: center; font-size: 12px;"><b>βž€ Tap the center of the camera screen to reveal focus and exposure adjustment buttons β˜€οΈŽ.</b></h1>""")
137
+ gr.Markdown("""<h1 style="text-align: center; font-size: 12px;"><b>βž‚ Drag the β˜€οΈŽ icon upward to capture a photo with a longer exposure.</b></h1>""")
138
+
139
+ gr.Markdown("""<h1 style="text-align: center; font-size: 24px;"><b>Enjoy it!</b></h1>""")
140
+ with gr.Row():
141
+ under_expo_img = gr.Image(label="Short Exposure Image", show_label=True,
142
+ image_mode="RGB",
143
+ sources=["upload", ],
144
+ width=IMG_W,
145
+ height=IMG_H,
146
+ type="pil"
147
+ )
148
+ over_expo_img = gr.Image(label="Long Exposure Image", show_label=True,
149
+ image_mode="RGB",
150
+ sources=["upload", ],
151
+ width=IMG_W,
152
+ height=IMG_H,
153
+ type="pil"
154
+ )
155
+ with gr.Row():
156
+ run_button = gr.Button("Run", variant="primary") # scale=0,
157
+
158
+ result = gr.Image(label="Result", show_label=True,
159
+ type='pil',
160
+ image_mode='RGB',
161
+ format="png",
162
+ width=IMG_W*2,
163
+ height=IMG_H*2,
164
+ )
165
+ gr.Markdown(r"""<h1 style="text-align: center; font-size: 18px;"><b>Like it? Click the button πŸ“₯ on the image to download.</b></h1>""") # width="100" height="100" <img src="ui/download.svg" alt="download">
166
+ with gr.Accordion("Advanced Settings", open=True):
167
+ num_inference_steps = gr.Slider(
168
+ label="Number of inference steps",
169
+ minimum=2,
170
+ maximum=50,
171
+ step=1,
172
+ value=20, # Replace with defaults that work for your model
173
+ interactive=True
174
+ )
175
 
176
+ gr.Examples(
177
+ examples=examples,
178
+ inputs=[under_expo_img, over_expo_img, num_inference_steps],
179
+ label="Examples",
180
+ # examples_per_page=10,
181
+ fn=infer,
182
+ cache_examples=True,
183
+ outputs=[result,],
184
+ )
185
+ gr.Markdown(_README_)
186
+ # gr.Markdown(_CITE_)
187
+ run_button.click(fn=infer,
188
+ inputs=[under_expo_img, over_expo_img, num_inference_steps],
189
+ outputs=[result,],
190
  )
191
+ return demo
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
192
 
193
  if __name__ == "__main__":
194
+ demo = build_demo()
195
  demo.queue(max_size=10)
196
  demo.launch(share=True)
197
  # demo.launch(server_name="0.0.0.0", debug=True, show_api=True, show_error=True, share=False)