Vishnu Anilkumar commited on
Commit
42e4237
·
1 Parent(s): 504bc81

initial code change

Browse files
Files changed (5) hide show
  1. app.py +73 -0
  2. kn_church-2.jpg +0 -0
  3. kn_church-8.jpg +0 -0
  4. packages.txt +3 -0
  5. requirements.txt +7 -0
app.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import matplotlib.pyplot as plt
2
+ import cv2
3
+ import kornia as K
4
+ import kornia.feature as KF
5
+ import numpy as np
6
+ import torch
7
+ from kornia_moons.feature import *
8
+ import gradio as gr
9
+ from kornia_moons.viz import draw_LAF_matches
10
+
11
+
12
+ def load_torch_image(fname):
13
+ img: Tensor = K.io.load_image(fname, K.io.ImageLoadType.RGB32)
14
+ img = img[None] # 1xCxHxW / fp32 / [0, 1]
15
+ img = K.geometry.resize(img, (700, 700))
16
+ return img
17
+
18
+
19
+ def inference(file1, file2):
20
+ fname1 = file1
21
+ fname2 = file2
22
+ img1 = load_torch_image(fname1)
23
+ img2 = load_torch_image(fname2)
24
+
25
+ matcher = KF.LoFTR(pretrained='outdoor')
26
+
27
+ input_dict = {"image0": K.color.rgb_to_grayscale(img1), # LofTR works on grayscale images only
28
+ "image1": K.color.rgb_to_grayscale(img2)}
29
+
30
+ with torch.no_grad():
31
+ correspondences = matcher(input_dict)
32
+ mkpts0 = correspondences['keypoints0'].cpu().numpy()
33
+ mkpts1 = correspondences['keypoints1'].cpu().numpy()
34
+ H, inliers = cv2.findFundamentalMat(mkpts0, mkpts1, cv2.USAC_MAGSAC, 0.5, 0.999, 100000)
35
+ inliers = inliers > 0
36
+ fig, ax = plt.subplots()
37
+
38
+ draw_LAF_matches(
39
+ KF.laf_from_center_scale_ori(torch.from_numpy(mkpts0).view(1, -1, 2),
40
+ torch.ones(mkpts0.shape[0]).view(1, -1, 1, 1),
41
+ torch.ones(mkpts0.shape[0]).view(1, -1, 1)),
42
+
43
+ KF.laf_from_center_scale_ori(torch.from_numpy(mkpts1).view(1, -1, 2),
44
+ torch.ones(mkpts1.shape[0]).view(1, -1, 1, 1),
45
+ torch.ones(mkpts1.shape[0]).view(1, -1, 1)),
46
+ torch.arange(mkpts0.shape[0]).view(-1, 1).repeat(1, 2),
47
+ K.tensor_to_image(img1),
48
+ K.tensor_to_image(img2),
49
+ inliers,
50
+ draw_dict={'inlier_color': (0.2, 1, 0.2),
51
+ 'tentative_color': None,
52
+ 'feature_color': (0.2, 0.5, 1), 'vertical': False}, ax=ax)
53
+ plt.axis('off')
54
+ fig.savefig('example.jpg', dpi=110, bbox_inches='tight')
55
+ return 'example.jpg'
56
+
57
+
58
+ title = "Kornia-Loftr"
59
+ description = "Gradio demo for Kornia-Loftr: Detector-Free Local Feature Matching with Transformers. To use it, simply upload your image, or click one of the examples to load them. Read more at the links below."
60
+ article = "<p style='text-align: center'><a href='https://kornia.readthedocs.io/en/latest/' target='_blank'>Open Source Differentiable Computer Vision Library</a> | <a href='https://github.com/kornia/kornia' target='_blank'>Kornia Github Repo</a> | <a href='https://github.com/zju3dv/LoFTR' target='_blank'>LoFTR Github</a> | <a href='https://arxiv.org/abs/2104.00680' target='_blank'>LoFTR: Detector-Free Local Feature Matching with Transformers</a></p>"
61
+ css = ".output_image, .input_image {height: 40rem !important; width: 100% !important;}"
62
+
63
+ examples = [['kn_church-2.jpg', 'kn_church-8.jpg']]
64
+ gr.Interface(
65
+ inference,
66
+ [gr.Image(type="filepath", label="Input1"), gr.Image(type="filepath", label="Input2")],
67
+ gr.Image(type="filepath", label="Output"),
68
+ title=title,
69
+ description=description,
70
+ article=article,
71
+ examples=examples,
72
+ css=css
73
+ ).launch(debug=True)
kn_church-2.jpg ADDED
kn_church-8.jpg ADDED
packages.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ ffmpeg
2
+ libsm6
3
+ libxext6
requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ matplotlib
2
+ kornia
3
+ kornia_rs
4
+ numpy
5
+ torch
6
+ kornia_moons
7
+ opencv-python