Update app.py
Browse files
app.py
CHANGED
|
@@ -147,8 +147,8 @@ def infer():
|
|
| 147 |
#img1_batch = torch.stack([frames[0]])
|
| 148 |
#img2_batch = torch.stack([frames[1]])
|
| 149 |
|
| 150 |
-
img1_batch = torch.stack([
|
| 151 |
-
img2_batch = torch.stack([
|
| 152 |
|
| 153 |
print(f"FRAME AFTER stack: {img1_batch}")
|
| 154 |
|
|
@@ -240,7 +240,7 @@ def infer():
|
|
| 240 |
# convert the tensor to PIL image using above transform
|
| 241 |
#img = transform(frames[1])
|
| 242 |
img = transform(input_frame_2)
|
| 243 |
-
img = img.resize((
|
| 244 |
# display the PIL image
|
| 245 |
#img.show()
|
| 246 |
frame2pil = np.array(img.convert('RGB'))
|
|
@@ -248,6 +248,18 @@ def infer():
|
|
| 248 |
print(f"frame1pil shape: {frame2pil.shape}")
|
| 249 |
print(f"frame1pil dtype: {frame2pil.dtype}")
|
| 250 |
img.save('raw_frame2.jpg')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 251 |
|
| 252 |
|
| 253 |
numpy_array_flow = predicted_flow.permute(1, 2, 0).detach().cpu().numpy()
|
|
@@ -264,7 +276,7 @@ def infer():
|
|
| 264 |
numpy_array_flow*=1.
|
| 265 |
# print('flow stats mul', flow.max(), flow.min(), flow.mean())
|
| 266 |
# res = cv2.remap(img, flow, None, cv2.INTER_LINEAR)
|
| 267 |
-
res = cv2.remap(
|
| 268 |
print(res)
|
| 269 |
|
| 270 |
res = Image.fromarray(res)
|
|
|
|
| 147 |
#img1_batch = torch.stack([frames[0]])
|
| 148 |
#img2_batch = torch.stack([frames[1]])
|
| 149 |
|
| 150 |
+
img1_batch = torch.stack([input_frame_1])
|
| 151 |
+
img2_batch = torch.stack([input_frame_2])
|
| 152 |
|
| 153 |
print(f"FRAME AFTER stack: {img1_batch}")
|
| 154 |
|
|
|
|
| 240 |
# convert the tensor to PIL image using above transform
|
| 241 |
#img = transform(frames[1])
|
| 242 |
img = transform(input_frame_2)
|
| 243 |
+
img = img.resize((520, 960))
|
| 244 |
# display the PIL image
|
| 245 |
#img.show()
|
| 246 |
frame2pil = np.array(img.convert('RGB'))
|
|
|
|
| 248 |
print(f"frame1pil shape: {frame2pil.shape}")
|
| 249 |
print(f"frame1pil dtype: {frame2pil.dtype}")
|
| 250 |
img.save('raw_frame2.jpg')
|
| 251 |
+
|
| 252 |
+
# convert the tensor diffused to PIL image using above transform
|
| 253 |
+
#img = transform(frames[1])
|
| 254 |
+
img_diff = transform(input_diffused)
|
| 255 |
+
img_diff = img_diff.resize((520, 960))
|
| 256 |
+
# display the PIL image
|
| 257 |
+
#img.show()
|
| 258 |
+
diffpil = np.array(img_diff.convert('RGB'))
|
| 259 |
+
print(f"frame1pil: {diffpil}")
|
| 260 |
+
print(f"frame1pil shape: {diffpil.shape}")
|
| 261 |
+
print(f"frame1pil dtype: {diffpil.dtype}")
|
| 262 |
+
img_diff.save('diffused_resized.jpg')
|
| 263 |
|
| 264 |
|
| 265 |
numpy_array_flow = predicted_flow.permute(1, 2, 0).detach().cpu().numpy()
|
|
|
|
| 276 |
numpy_array_flow*=1.
|
| 277 |
# print('flow stats mul', flow.max(), flow.min(), flow.mean())
|
| 278 |
# res = cv2.remap(img, flow, None, cv2.INTER_LINEAR)
|
| 279 |
+
res = cv2.remap(diffpil, numpy_array_flow, None, cv2.INTER_LANCZOS4)
|
| 280 |
print(res)
|
| 281 |
|
| 282 |
res = Image.fromarray(res)
|