hillol7 commited on
Commit
4a1665d
·
verified ·
1 Parent(s): 62be976

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +63 -61
app.py CHANGED
@@ -1,64 +1,66 @@
1
  import streamlit as st
2
  import cv2
3
- import numpy as np
4
-
5
- st.title("Object Tracking in Video using SIFT")
6
-
7
- # Upload files
8
- uploaded_image = st.file_uploader("Upload an image", type=['png', 'jpg'])
9
- uploaded_video = st.file_uploader("Upload a video", type=['mp4'])
10
-
11
- if uploaded_image and uploaded_video:
12
- # Convert uploaded files to OpenCV-compatible formats
13
- image_bytes = np.asarray(bytearray(uploaded_image.read()), dtype=np.uint8)
14
- input_image = cv2.imdecode(image_bytes, cv2.IMREAD_GRAYSCALE)
15
-
16
- video_bytes = np.asarray(bytearray(uploaded_video.read()), dtype=np.uint8)
17
- cap = cv2.VideoCapture()
18
- cap.open(uploaded_video.name)
19
-
20
- # Initialize SIFT
21
- sift = cv2.SIFT_create()
22
- bf = cv2.BFMatcher()
23
-
24
- # Detect keypoints and descriptors for the input image
25
- keypoints_input, descriptors_input = sift.detectAndCompute(input_image, None)
26
-
27
- occurrences = 0
28
- occurrence_start = 0
29
- occurrence_duration = 0
30
- prev_matches = []
31
-
32
- while cap.isOpened():
33
- ret, frame = cap.read()
34
- if not ret:
35
- st.write("End of video reached.")
36
- break
37
-
38
- frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
39
-
40
- # Detect keypoints and descriptors for the video frame
41
- keypoints_frame, descriptors_frame = sift.detectAndCompute(frame_gray, None)
42
-
43
- # Match descriptors
44
- matches = bf.knnMatch(descriptors_input, descriptors_frame, k=2)
45
-
46
- good_matches = []
47
- for m, n in matches:
48
- if m.distance < 0.75 * n.distance:
49
- good_matches.append(m)
50
-
51
- if len(good_matches) >= 6:
52
- if not prev_matches:
53
- occurrence_start = cap.get(cv2.CAP_PROP_POS_MSEC) / 1000
54
- occurrences += 1
55
-
56
- prev_matches = good_matches
57
- occurrence_duration = (cap.get(cv2.CAP_PROP_POS_MSEC) / 1000) - occurrence_start
58
- else:
59
- if prev_matches:
60
- st.write(f"Occurrence {occurrences}: Start time: {occurrence_start:.2f}s, Duration: {occurrence_duration:.2f}s")
61
- prev_matches = []
62
-
63
- cap.release()
64
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import streamlit as st
2
  import cv2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
 
4
+ def main():
5
+ st.title("SIFT Object Tracking in Video")
6
+ st.sidebar.title("Upload Files")
7
+
8
+ uploaded_image = st.sidebar.file_uploader("Upload an image (PNG or JPG)", type=["png", "jpg"])
9
+ uploaded_video = st.sidebar.file_uploader("Upload a video (MP4)", type=["mp4"])
10
+
11
+ if uploaded_image and uploaded_video:
12
+ st.sidebar.success("Files successfully uploaded!")
13
+ image = cv2.imdecode(np.fromstring(uploaded_image.read(), np.uint8), cv2.IMREAD_COLOR)
14
+ video = cv2.VideoCapture(uploaded_video)
15
+
16
+ st.header("Uploaded Image")
17
+ st.image(image, caption="Uploaded Image", use_column_width=True)
18
+
19
+ st.header("Uploaded Video")
20
+ st.video(uploaded_video)
21
+
22
+ # Perform object tracking with SIFT
23
+ sift = cv2.SIFT_create()
24
+ keypoints_input, descriptors_input = sift.detectAndCompute(image, None)
25
+ bf = cv2.BFMatcher()
26
+
27
+ occurrences = 0
28
+ occurrence_start = 0
29
+ occurrence_duration = 0
30
+ prev_matches = []
31
+
32
+ while video.isOpened():
33
+ ret, frame = video.read()
34
+ if not ret:
35
+ st.write("End of video reached.")
36
+ break
37
+
38
+ frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
39
+ keypoints_frame, descriptors_frame = sift.detectAndCompute(frame_gray, None)
40
+
41
+ matches = bf.knnMatch(descriptors_input, descriptors_frame, k=2)
42
+
43
+ good_matches = []
44
+ for m, n in matches:
45
+ if m.distance < 0.75 * n.distance:
46
+ good_matches.append(m)
47
+
48
+ if len(good_matches) >= 6:
49
+ if not prev_matches:
50
+ occurrence_start = video.get(cv2.CAP_PROP_POS_MSEC) / 1000
51
+ occurrences += 1
52
+
53
+ prev_matches = good_matches
54
+ occurrence_duration = (video.get(cv2.CAP_PROP_POS_MSEC) / 1000) - occurrence_start
55
+ else:
56
+ if prev_matches:
57
+ st.write(f"Occurrence {occurrences}: Start time: {occurrence_start:.2f}s, Duration: {occurrence_duration:.2f}s")
58
+ prev_matches = []
59
+
60
+ video.release()
61
+
62
+ else:
63
+ st.sidebar.warning("Please upload both an image and a video.")
64
+
65
+ if __name__ == "__main__":
66
+ main()