hillol7 commited on
Commit
6e84592
·
verified ·
1 Parent(s): 6ecf689

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +64 -0
app.py ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import cv2
3
+ import numpy as np
4
+
5
+ st.title("Object Tracking in Video using SIFT")
6
+
7
+ # Upload files
8
+ uploaded_image = st.file_uploader("Upload an image", type=['png', 'jpg'])
9
+ uploaded_video = st.file_uploader("Upload a video", type=['mp4'])
10
+
11
+ if uploaded_image and uploaded_video:
12
+ # Convert uploaded files to OpenCV-compatible formats
13
+ image_bytes = np.asarray(bytearray(uploaded_image.read()), dtype=np.uint8)
14
+ input_image = cv2.imdecode(image_bytes, cv2.IMREAD_GRAYSCALE)
15
+
16
+ video_bytes = np.asarray(bytearray(uploaded_video.read()), dtype=np.uint8)
17
+ cap = cv2.VideoCapture()
18
+ cap.open(uploaded_video.name)
19
+
20
+ # Initialize SIFT
21
+ sift = cv2.SIFT_create()
22
+ bf = cv2.BFMatcher()
23
+
24
+ # Detect keypoints and descriptors for the input image
25
+ keypoints_input, descriptors_input = sift.detectAndCompute(input_image, None)
26
+
27
+ occurrences = 0
28
+ occurrence_start = 0
29
+ occurrence_duration = 0
30
+ prev_matches = []
31
+
32
+ while cap.isOpened():
33
+ ret, frame = cap.read()
34
+ if not ret:
35
+ st.write("End of video reached.")
36
+ break
37
+
38
+ frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
39
+
40
+ # Detect keypoints and descriptors for the video frame
41
+ keypoints_frame, descriptors_frame = sift.detectAndCompute(frame_gray, None)
42
+
43
+ # Match descriptors
44
+ matches = bf.knnMatch(descriptors_input, descriptors_frame, k=2)
45
+
46
+ good_matches = []
47
+ for m, n in matches:
48
+ if m.distance < 0.75 * n.distance:
49
+ good_matches.append(m)
50
+
51
+ if len(good_matches) >= 6:
52
+ if not prev_matches:
53
+ occurrence_start = cap.get(cv2.CAP_PROP_POS_MSEC) / 1000
54
+ occurrences += 1
55
+
56
+ prev_matches = good_matches
57
+ occurrence_duration = (cap.get(cv2.CAP_PROP_POS_MSEC) / 1000) - occurrence_start
58
+ else:
59
+ if prev_matches:
60
+ st.write(f"Occurrence {occurrences}: Start time: {occurrence_start:.2f}s, Duration: {occurrence_duration:.2f}s")
61
+ prev_matches = []
62
+
63
+ cap.release()
64
+