ayush200399391001 commited on
Commit
5b3f437
Β·
verified Β·
1 Parent(s): 4970cf1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +386 -386
app.py CHANGED
@@ -1,387 +1,387 @@
1
- import cv2
2
- import os
3
- import json
4
- import subprocess
5
- import numpy as np
6
- import torch
7
- import matplotlib.pyplot as plt
8
- from tqdm import tqdm
9
- from PIL import Image
10
- from transformers import (
11
- AutoImageProcessor,
12
- AutoModelForObjectDetection
13
- )
14
- import os
15
- import tempfile
16
-
17
- # -------------------- Configuration -------------------- #
18
- DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
19
- FRAME_EXTRACTION_INTERVAL = 0.01 # Seconds between frame captures
20
-
21
- # -------------------- Model Loading -------------------- #
22
- try:
23
- print("πŸ”„ Loading visual model and processor...")
24
- processor_visual = AutoImageProcessor.from_pretrained("facebook/detr-resnet-50")
25
- model_visual = AutoModelForObjectDetection.from_pretrained("facebook/detr-resnet-50").to(DEVICE)
26
- print(f"βœ… Model loaded on {DEVICE} successfully!")
27
- except Exception as e:
28
- print(f"❌ Error loading model: {e}")
29
- exit()
30
-
31
- # -------------------- Metadata Extraction -------------------- #
32
- def extract_metadata(video_path):
33
- """Extracts video metadata using FFmpeg"""
34
- try:
35
- cmd = ["ffprobe", "-v", "quiet", "-print_format", "json",
36
- "-show_format", "-show_streams", video_path]
37
- result = subprocess.run(cmd, capture_output=True, text=True)
38
- return json.loads(result.stdout)
39
- except Exception as e:
40
- print(f"❌ Metadata extraction failed: {e}")
41
- return {}
42
-
43
- # -------------------- Frame Extraction -------------------- #
44
- def extract_frames(video_path, output_folder="frames"):
45
- """Extracts frames from video at specified interval (supports sub-second intervals)"""
46
- os.makedirs(output_folder, exist_ok=True)
47
-
48
- cap = cv2.VideoCapture(video_path)
49
- if not cap.isOpened():
50
- print("❌ Could not open video file")
51
- return 0
52
-
53
- fps = cap.get(cv2.CAP_PROP_FPS)
54
- total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) # Total frames in the video
55
- total_duration = total_frames / fps # Total duration in seconds
56
- frame_count = 0
57
-
58
- # Use a while loop for sub-second intervals
59
- timestamp = 0.0
60
- while timestamp <= total_duration:
61
- cap.set(cv2.CAP_PROP_POS_MSEC, timestamp * 1000) # Convert seconds to milliseconds
62
- ret, frame = cap.read()
63
- if ret:
64
- cv2.imwrite(f"{output_folder}/frame_{frame_count:04d}.jpg", frame)
65
- frame_count += 1
66
- else:
67
- break # Stop if we can't read any more frames
68
-
69
- timestamp += FRAME_EXTRACTION_INTERVAL # Increment by the interval
70
-
71
- cap.release()
72
- return frame_count
73
- # -------------------- Optical Flow Calculation -------------------- #
74
- def calculate_optical_flow(frames_folder):
75
- """Calculates dense optical flow between consecutive frames with validation"""
76
- frame_files = sorted([f for f in os.listdir(frames_folder) if f.endswith(".jpg")])
77
- flow_results = []
78
-
79
- # Get reference dimensions from first valid frame
80
- ref_height, ref_width = None, None
81
- for f in frame_files:
82
- frame = cv2.imread(os.path.join(frames_folder, f))
83
- if frame is not None:
84
- ref_height, ref_width = frame.shape[:2]
85
- break
86
-
87
- if ref_height is None:
88
- print("⚠ No valid frames found for optical flow calculation")
89
- return []
90
-
91
- prev_gray = None
92
- for i in tqdm(range(len(frame_files)), desc="Calculating optical flow"):
93
- current_path = os.path.join(frames_folder, frame_files[i])
94
- current_frame = cv2.imread(current_path)
95
-
96
- if current_frame is None:
97
- continue
98
-
99
- # Ensure consistent dimensions
100
- if current_frame.shape[:2] != (ref_height, ref_width):
101
- current_frame = cv2.resize(current_frame, (ref_width, ref_height))
102
-
103
- # Ensure 3-channel color format
104
- if len(current_frame.shape) == 2:
105
- current_frame = cv2.cvtColor(current_frame, cv2.COLOR_GRAY2BGR)
106
-
107
- current_gray = cv2.cvtColor(current_frame, cv2.COLOR_BGR2GRAY)
108
-
109
- if prev_gray is not None:
110
- flow = cv2.calcOpticalFlowFarneback(
111
- prev_gray, current_gray, None,
112
- pyr_scale=0.5, levels=3, iterations=3,
113
- winsize=15, poly_n=5, poly_sigma=1.2, flags=0
114
- )
115
-
116
- flow_magnitude = np.sqrt(flow[...,0]*2 + flow[...,1]*2)
117
- flow_results.append({
118
- "max_flow": float(flow_magnitude.max()),
119
- "mean_flow": float(flow_magnitude.mean())
120
- })
121
-
122
- prev_gray = current_gray
123
-
124
- # Apply temporal smoothing
125
- window_size = 5
126
- smoothed_flow = []
127
- for i in range(len(flow_results)):
128
- start = max(0, i - window_size // 2)
129
- end = min(len(flow_results), i + window_size // 2 + 1)
130
- window = flow_results[start:end]
131
- avg_mean = np.mean([f['mean_flow'] for f in window])
132
- avg_max = np.mean([f['max_flow'] for f in window])
133
- smoothed_flow.append({'mean_flow': avg_mean, 'max_flow': avg_max})
134
-
135
- return smoothed_flow
136
-
137
- # -------------------- Visual Analysis -------------------- #
138
- def detect_objects(frames_folder):
139
- """Processes frames through the visual detection model"""
140
- results = []
141
- frame_files = sorted([f for f in os.listdir(frames_folder) if f.endswith(".jpg")])
142
-
143
- for frame_file in tqdm(frame_files, desc="Analyzing frames"):
144
- try:
145
- image = Image.open(os.path.join(frames_folder, frame_file))
146
- inputs = processor_visual(images=image, return_tensors="pt").to(DEVICE)
147
-
148
- with torch.no_grad():
149
- outputs = model_visual(**inputs)
150
-
151
- # Process detections with lower threshold
152
- target_sizes = torch.tensor([image.size[::-1]]).to(DEVICE)
153
- detections = processor_visual.post_process_object_detection(
154
- outputs, target_sizes=target_sizes, threshold=0.4 # Lowered from 0.7
155
- )[0]
156
-
157
- scores = detections["scores"].cpu().numpy().tolist()
158
- max_confidence = max(scores) if scores else 0.0
159
-
160
- results.append({
161
- "frame": frame_file,
162
- "detections": len(scores),
163
- "max_confidence": max_confidence,
164
- "average_confidence": np.mean(scores) if scores else 0.0
165
- })
166
-
167
- except Exception as e:
168
- print(f"⚠ Error processing {frame_file}: {e}")
169
- results.append({
170
- "frame": frame_file,
171
- "detections": 0,
172
- "max_confidence": 0.0,
173
- "average_confidence": 0.0
174
- })
175
-
176
- return results
177
-
178
- # -------------------- Manipulation Detection -------------------- #
179
- def detect_manipulation(report_path="report.json"):
180
- """Determines video authenticity based on analysis results"""
181
- try:
182
- with open(report_path) as f:
183
- report = json.load(f)
184
-
185
- # Adjusted thresholds
186
- CONFIDENCE_THRESHOLD = 0.80 # Reduced from 0.65
187
- FLOW_STD_THRESHOLD = 28 # New standard deviation threshold
188
- SUSPICIOUS_FRAME_RATIO = 0.3 # Increased from 0.25
189
-
190
- stats = report["summary_stats"]
191
-
192
- # New metrics
193
- confidence_std = np.std([r["average_confidence"] for r in report["frame_analysis"]])
194
- flow_std = stats.get("std_optical_flow", 0)
195
- low_conf_frames = sum(1 for r in report["frame_analysis"] if r["average_confidence"] < 0.4)
196
- anomaly_ratio = low_conf_frames / len(report["frame_analysis"])
197
-
198
- # Multi-factor scoring
199
- score = 0
200
- if stats["average_detection_confidence"] < CONFIDENCE_THRESHOLD:
201
- score += 1.5
202
- if flow_std > FLOW_STD_THRESHOLD:
203
- score += 1.2
204
- if anomaly_ratio > SUSPICIOUS_FRAME_RATIO:
205
- score += 1.0
206
- if confidence_std > 0.2: # High variance in confidence
207
- score += 0.8
208
-
209
- return score
210
-
211
- except Exception as e:
212
- return f"❌ Error in analysis: {str(e)}"
213
-
214
- # -------------------- Reporting -------------------- #
215
- # -------------------- Reporting -------------------- #
216
- def generate_report(visual_results, flow_results, output_file="report.json"):
217
- """Generates comprehensive analysis report"""
218
- report_data = {
219
- "frame_analysis": visual_results,
220
- "motion_analysis": flow_results,
221
- "summary_stats": {
222
- "max_detection_confidence": max(r["max_confidence"] for r in visual_results),
223
- "average_detection_confidence": np.mean([r["average_confidence"] for r in visual_results]),
224
- "detection_confidence_std": np.std([r["average_confidence"] for r in visual_results]),
225
- "peak_optical_flow": max(r["max_flow"] for r in flow_results) if flow_results else 0,
226
- "average_optical_flow": np.mean([r["mean_flow"] for r in flow_results]) if flow_results else 0,
227
- "std_optical_flow": np.std([r["mean_flow"] for r in flow_results]) if flow_results else 0
228
- }
229
- }
230
-
231
- with open(output_file, "w") as f:
232
- json.dump(report_data, f, indent=2)
233
-
234
- # ... rest of visualization code ...
235
-
236
- return report_data # Added return statement
237
-
238
- # -------------------- Main Pipeline -------------------- #
239
- def analyze_video(video_path):
240
- """Complete video analysis workflow"""
241
- print("\nπŸ“‹ Metadata Extraction:")
242
- metadata = extract_metadata(video_path)
243
- print(json.dumps(metadata.get("streams", [{}])[0], indent=2))
244
-
245
- print("\n🎞 Frame Extraction:")
246
- frame_count = extract_frames(video_path)
247
- print(f"βœ… Extracted {frame_count} frames at {FRAME_EXTRACTION_INTERVAL}s intervals")
248
-
249
- print("\nπŸ” Running object detection...")
250
- visual_results = detect_objects("frames")
251
-
252
- print("\nπŸŒ€ Calculating optical flow...")
253
- flow_results = calculate_optical_flow("frames")
254
-
255
- print("\nπŸ“Š Generating Final Report...")
256
- report_data = generate_report(visual_results, flow_results)
257
-
258
- print("\nπŸ” Authenticity Analysis:")
259
- score = detect_manipulation() # This function should return a score
260
-
261
- print(f"\n🎯 Final Score: {score}") # Debugging line
262
- return score # βœ… Ensure this score is returned properly
263
-
264
-
265
-
266
- # -------------------- Execution -------------------- #
267
-
268
-
269
-
270
- #--------------------------------Streamlit---------------------------------------------#
271
- #--------------------------------Streamlit---------------------------------------------#
272
- import streamlit as st
273
- import tempfile
274
- def local_css(file_name):
275
- with open(file_name) as f:
276
- st.markdown(f"<style>{f.read()}</style>", unsafe_allow_html=True)
277
- local_css("style.css") # Ensure you have a separate style.css file
278
-
279
- # Sidebar for Navigation
280
- # Navigation
281
- st.sidebar.title("Navigation")
282
- page = st.sidebar.radio("", ["Home", "Analyze Video", "About"])
283
-
284
- # Home Page
285
- if page == "Home":
286
- st.markdown("<h1 class='title'>Video Manipulation Detection</h1>", unsafe_allow_html=True)
287
-
288
- # Hero Section
289
- col1, col2 = st.columns(2)
290
- with col1:
291
- st.markdown("""
292
- <div class='hero-text'>
293
- Detect manipulated videos with AI-powered analysis.
294
- Protect yourself from deepfakes and synthetic media.
295
- </div>
296
- """, unsafe_allow_html=True)
297
-
298
- with col2:
299
- st.video("Realistic Universe Intro_free.mp4") # Add sample video URL
300
-
301
- # Features Section
302
- st.markdown("## How It Works")
303
- cols = st.columns(3)
304
- with cols[0]:
305
- st.image("upload-icon.png", width=100)
306
- st.markdown("### Upload Video")
307
- with cols[1]:
308
- st.image("analyze-icon.png", width=100)
309
- st.markdown("### AI Analysis")
310
- with cols[2]:
311
- st.image("result-icon.png", width=100)
312
- st.markdown("### Get Results")
313
-
314
-
315
- elif page == "Analyze Video":
316
- uploaded_file = st.file_uploader("Upload a Video", type=["mp4", "mov"])
317
-
318
- if uploaded_file is not None:
319
- # Save uploaded file to a temporary location
320
- with tempfile.NamedTemporaryFile(delete=False, suffix=".mp4") as temp_file:
321
- temp_file.write(uploaded_file.read())
322
- temp_video_path = temp_file.name # βœ… Correct variable name
323
-
324
- st.video(temp_video_path)
325
-
326
- if st.button("Analyze Video"):
327
- with st.spinner("Analyzing..."):
328
- try:
329
- score = analyze_video(temp_video_path) # βœ… Ensure function exists
330
-
331
- # Debugging Line
332
- st.write(f"Analysis Score: {score}")
333
- float(score)
334
- # Display result based on score
335
- if score >= 3.5 :
336
- st.markdown(f"""
337
- <div class='result-box suspicious'>
338
- <p>This video shows major signs of manipulation</p>
339
- </div>
340
- """, unsafe_allow_html=True)
341
- elif score >= 2.0:
342
- st.markdown(f"""
343
- <div class='result-box suspicious'>
344
- <p>This video shows minor signs of manipulation</p>
345
- </div>
346
- """, unsafe_allow_html=True)
347
- else:
348
- st.markdown(f"""
349
- <div class='result-box clean'>
350
- <p>No significant manipulation detected</p>
351
- </div>
352
- """, unsafe_allow_html=True)
353
- except Exception as e:
354
- st.error(f"An error occurred during analysis: {e}")
355
-
356
- elif page == "About": # βœ… Now this will work correctly
357
- st.markdown("<h1 class='title'>About Us</h1>", unsafe_allow_html=True)
358
-
359
- # Creator Profile
360
- col1, col2 = st.columns(2)
361
- with col1:
362
- st.image("creator.jpg", width=300, caption="Ayush Agarwal, Lead Developer")
363
- with col2:
364
- st.markdown("""
365
- <div class='about-text'>
366
- ## Ayush Agarwal ,
367
- Student at VIT Bhopal University ,
368
- AIML enthusiast
369
- <br><br>
370
- πŸ“§ [email protected]
371
- <br>
372
- πŸ”— [LinkedIn](www.linkedin.com/in/ayush20039939)
373
- <br>
374
- πŸ™ [GitHub](https://github.com)
375
- </div>
376
- """, unsafe_allow_html=True)
377
-
378
- # Technology Stack
379
- st.markdown("## Our Technology")
380
- st.markdown("""
381
- <div class='tech-stack'>
382
- <img src='https://img.icons8.com/color/96/000000/python.png'/>
383
- <img src='https://img.icons8.com/color/96/000000/tensorflow.png'/>
384
- <img src='https://img.icons8.com/color/96/000000/opencv.png'/>
385
- <img src='https://raw.githubusercontent.com/github/explore/968d1eb8fb6b704c6be917f0000283face4f33ee/topics/streamlit/streamlit.png'/>
386
- </div>
387
  """, unsafe_allow_html=True)
 
1
+ import cv2
2
+ import os
3
+ import json
4
+ import subprocess
5
+ import numpy as np
6
+ import torch
7
+ import matplotlib.pyplot as plt
8
+ from tqdm import tqdm
9
+ from PIL import Image
10
+ from transformers import (
11
+ AutoImageProcessor,
12
+ AutoModelForObjectDetection
13
+ )
14
+ import os
15
+ import tempfile
16
+
17
+ # -------------------- Configuration -------------------- #
18
+ DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
19
+ FRAME_EXTRACTION_INTERVAL = 0.01 # Seconds between frame captures
20
+
21
+ # -------------------- Model Loading -------------------- #
22
+ try:
23
+ print("πŸ”„ Loading visual model and processor...")
24
+ processor_visual = AutoImageProcessor.from_pretrained("facebook/detr-resnet-50")
25
+ model_visual = AutoModelForObjectDetection.from_pretrained("facebook/detr-resnet-50").to(DEVICE)
26
+ print(f"βœ… Model loaded on {DEVICE} successfully!")
27
+ except Exception as e:
28
+ print(f"❌ Error loading model: {e}")
29
+ exit()
30
+
31
+ # -------------------- Metadata Extraction -------------------- #
32
+ def extract_metadata(video_path):
33
+ """Extracts video metadata using FFmpeg"""
34
+ try:
35
+ cmd = ["ffprobe", "-v", "quiet", "-print_format", "json",
36
+ "-show_format", "-show_streams", video_path]
37
+ result = subprocess.run(cmd, capture_output=True, text=True)
38
+ return json.loads(result.stdout)
39
+ except Exception as e:
40
+ print(f"❌ Metadata extraction failed: {e}")
41
+ return {}
42
+
43
+ # -------------------- Frame Extraction -------------------- #
44
+ def extract_frames(video_path, output_folder="frames"):
45
+ """Extracts frames from video at specified interval (supports sub-second intervals)"""
46
+ os.makedirs(output_folder, exist_ok=True)
47
+
48
+ cap = cv2.VideoCapture(video_path)
49
+ if not cap.isOpened():
50
+ print("❌ Could not open video file")
51
+ return 0
52
+
53
+ fps = cap.get(cv2.CAP_PROP_FPS)
54
+ total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) # Total frames in the video
55
+ total_duration = total_frames / fps # Total duration in seconds
56
+ frame_count = 0
57
+
58
+ # Use a while loop for sub-second intervals
59
+ timestamp = 0.0
60
+ while timestamp <= total_duration:
61
+ cap.set(cv2.CAP_PROP_POS_MSEC, timestamp * 1000) # Convert seconds to milliseconds
62
+ ret, frame = cap.read()
63
+ if ret:
64
+ cv2.imwrite(f"{output_folder}/frame_{frame_count:04d}.jpg", frame)
65
+ frame_count += 1
66
+ else:
67
+ break # Stop if we can't read any more frames
68
+
69
+ timestamp += FRAME_EXTRACTION_INTERVAL # Increment by the interval
70
+
71
+ cap.release()
72
+ return frame_count
73
+ # -------------------- Optical Flow Calculation -------------------- #
74
+ def calculate_optical_flow(frames_folder):
75
+ """Calculates dense optical flow between consecutive frames with validation"""
76
+ frame_files = sorted([f for f in os.listdir(frames_folder) if f.endswith(".jpg")])
77
+ flow_results = []
78
+
79
+ # Get reference dimensions from first valid frame
80
+ ref_height, ref_width = None, None
81
+ for f in frame_files:
82
+ frame = cv2.imread(os.path.join(frames_folder, f))
83
+ if frame is not None:
84
+ ref_height, ref_width = frame.shape[:2]
85
+ break
86
+
87
+ if ref_height is None:
88
+ print("⚠ No valid frames found for optical flow calculation")
89
+ return []
90
+
91
+ prev_gray = None
92
+ for i in tqdm(range(len(frame_files)), desc="Calculating optical flow"):
93
+ current_path = os.path.join(frames_folder, frame_files[i])
94
+ current_frame = cv2.imread(current_path)
95
+
96
+ if current_frame is None:
97
+ continue
98
+
99
+ # Ensure consistent dimensions
100
+ if current_frame.shape[:2] != (ref_height, ref_width):
101
+ current_frame = cv2.resize(current_frame, (ref_width, ref_height))
102
+
103
+ # Ensure 3-channel color format
104
+ if len(current_frame.shape) == 2:
105
+ current_frame = cv2.cvtColor(current_frame, cv2.COLOR_GRAY2BGR)
106
+
107
+ current_gray = cv2.cvtColor(current_frame, cv2.COLOR_BGR2GRAY)
108
+
109
+ if prev_gray is not None:
110
+ flow = cv2.calcOpticalFlowFarneback(
111
+ prev_gray, current_gray, None,
112
+ pyr_scale=0.5, levels=3, iterations=3,
113
+ winsize=15, poly_n=5, poly_sigma=1.2, flags=0
114
+ )
115
+
116
+ flow_magnitude = np.sqrt(flow[...,0]*2 + flow[...,1]*2)
117
+ flow_results.append({
118
+ "max_flow": float(flow_magnitude.max()),
119
+ "mean_flow": float(flow_magnitude.mean())
120
+ })
121
+
122
+ prev_gray = current_gray
123
+
124
+ # Apply temporal smoothing
125
+ window_size = 5
126
+ smoothed_flow = []
127
+ for i in range(len(flow_results)):
128
+ start = max(0, i - window_size // 2)
129
+ end = min(len(flow_results), i + window_size // 2 + 1)
130
+ window = flow_results[start:end]
131
+ avg_mean = np.mean([f['mean_flow'] for f in window])
132
+ avg_max = np.mean([f['max_flow'] for f in window])
133
+ smoothed_flow.append({'mean_flow': avg_mean, 'max_flow': avg_max})
134
+
135
+ return smoothed_flow
136
+
137
+ # -------------------- Visual Analysis -------------------- #
138
+ def detect_objects(frames_folder):
139
+ """Processes frames through the visual detection model"""
140
+ results = []
141
+ frame_files = sorted([f for f in os.listdir(frames_folder) if f.endswith(".jpg")])
142
+
143
+ for frame_file in tqdm(frame_files, desc="Analyzing frames"):
144
+ try:
145
+ image = Image.open(os.path.join(frames_folder, frame_file))
146
+ inputs = processor_visual(images=image, return_tensors="pt").to(DEVICE)
147
+
148
+ with torch.no_grad():
149
+ outputs = model_visual(**inputs)
150
+
151
+ # Process detections with lower threshold
152
+ target_sizes = torch.tensor([image.size[::-1]]).to(DEVICE)
153
+ detections = processor_visual.post_process_object_detection(
154
+ outputs, target_sizes=target_sizes, threshold=0.4 # Lowered from 0.7
155
+ )[0]
156
+
157
+ scores = detections["scores"].cpu().numpy().tolist()
158
+ max_confidence = max(scores) if scores else 0.0
159
+
160
+ results.append({
161
+ "frame": frame_file,
162
+ "detections": len(scores),
163
+ "max_confidence": max_confidence,
164
+ "average_confidence": np.mean(scores) if scores else 0.0
165
+ })
166
+
167
+ except Exception as e:
168
+ print(f"⚠ Error processing {frame_file}: {e}")
169
+ results.append({
170
+ "frame": frame_file,
171
+ "detections": 0,
172
+ "max_confidence": 0.0,
173
+ "average_confidence": 0.0
174
+ })
175
+
176
+ return results
177
+
178
+ # -------------------- Manipulation Detection -------------------- #
179
+ def detect_manipulation(report_path="report.json"):
180
+ """Determines video authenticity based on analysis results"""
181
+ try:
182
+ with open(report_path) as f:
183
+ report = json.load(f)
184
+
185
+ # Adjusted thresholds
186
+ CONFIDENCE_THRESHOLD = 0.80 # Reduced from 0.65
187
+ FLOW_STD_THRESHOLD = 28 # New standard deviation threshold
188
+ SUSPICIOUS_FRAME_RATIO = 0.3 # Increased from 0.25
189
+
190
+ stats = report["summary_stats"]
191
+
192
+ # New metrics
193
+ confidence_std = np.std([r["average_confidence"] for r in report["frame_analysis"]])
194
+ flow_std = stats.get("std_optical_flow", 0)
195
+ low_conf_frames = sum(1 for r in report["frame_analysis"] if r["average_confidence"] < 0.4)
196
+ anomaly_ratio = low_conf_frames / len(report["frame_analysis"])
197
+
198
+ # Multi-factor scoring
199
+ score = 0
200
+ if stats["average_detection_confidence"] < CONFIDENCE_THRESHOLD:
201
+ score += 1.5
202
+ if flow_std > FLOW_STD_THRESHOLD:
203
+ score += 1.2
204
+ if anomaly_ratio > SUSPICIOUS_FRAME_RATIO:
205
+ score += 1.0
206
+ if confidence_std > 0.2: # High variance in confidence
207
+ score += 0.8
208
+
209
+ return score
210
+
211
+ except Exception as e:
212
+ return f"❌ Error in analysis: {str(e)}"
213
+
214
+ # -------------------- Reporting -------------------- #
215
+ # -------------------- Reporting -------------------- #
216
+ def generate_report(visual_results, flow_results, output_file="report.json"):
217
+ """Generates comprehensive analysis report"""
218
+ report_data = {
219
+ "frame_analysis": visual_results,
220
+ "motion_analysis": flow_results,
221
+ "summary_stats": {
222
+ "max_detection_confidence": max(r["max_confidence"] for r in visual_results),
223
+ "average_detection_confidence": np.mean([r["average_confidence"] for r in visual_results]),
224
+ "detection_confidence_std": np.std([r["average_confidence"] for r in visual_results]),
225
+ "peak_optical_flow": max(r["max_flow"] for r in flow_results) if flow_results else 0,
226
+ "average_optical_flow": np.mean([r["mean_flow"] for r in flow_results]) if flow_results else 0,
227
+ "std_optical_flow": np.std([r["mean_flow"] for r in flow_results]) if flow_results else 0
228
+ }
229
+ }
230
+
231
+ with open(output_file, "w") as f:
232
+ json.dump(report_data, f, indent=2)
233
+
234
+ # ... rest of visualization code ...
235
+
236
+ return report_data # Added return statement
237
+
238
+ # -------------------- Main Pipeline -------------------- #
239
+ def analyze_video(video_path):
240
+ """Complete video analysis workflow"""
241
+ print("\nπŸ“‹ Metadata Extraction:")
242
+ metadata = extract_metadata(video_path)
243
+ print(json.dumps(metadata.get("streams", [{}])[0], indent=2))
244
+
245
+ print("\n🎞 Frame Extraction:")
246
+ frame_count = extract_frames(video_path)
247
+ print(f"βœ… Extracted {frame_count} frames at {FRAME_EXTRACTION_INTERVAL}s intervals")
248
+
249
+ print("\nπŸ” Running object detection...")
250
+ visual_results = detect_objects("frames")
251
+
252
+ print("\nπŸŒ€ Calculating optical flow...")
253
+ flow_results = calculate_optical_flow("frames")
254
+
255
+ print("\nπŸ“Š Generating Final Report...")
256
+ report_data = generate_report(visual_results, flow_results)
257
+
258
+ print("\nπŸ” Authenticity Analysis:")
259
+ score = detect_manipulation() # This function should return a score
260
+
261
+ print(f"\n🎯 Final Score: {score}") # Debugging line
262
+ return score # βœ… Ensure this score is returned properly
263
+
264
+
265
+
266
+ # -------------------- Execution -------------------- #
267
+
268
+
269
+
270
+ #--------------------------------Streamlit---------------------------------------------#
271
+ #--------------------------------Streamlit---------------------------------------------#
272
+ import streamlit as st
273
+ import tempfile
274
+ def local_css(file_name):
275
+ with open(file_name) as f:
276
+ st.markdown(f"<style>{f.read()}</style>", unsafe_allow_html=True)
277
+ local_css("style.css") # Ensure you have a separate style.css file
278
+
279
+ # Sidebar for Navigation
280
+ # Navigation
281
+ st.sidebar.title("Navigation")
282
+ page = st.sidebar.radio("", ["Home", "Analyze Video", "About"])
283
+
284
+ # Home Page
285
+ if page == "Home":
286
+ st.markdown("<h1 class='title'>Video Manipulation Detection</h1>", unsafe_allow_html=True)
287
+
288
+ # Hero Section
289
+ col1, col2 = st.columns(2)
290
+ with col1:
291
+ st.markdown("""
292
+ <div class='hero-text'>
293
+ Detect manipulated videos with AI-powered analysis.
294
+ Protect yourself from deepfakes and synthetic media.
295
+ </div>
296
+ """, unsafe_allow_html=True)
297
+
298
+ with col2:
299
+ st.video("Realistic Universe Intro_free.mp4") # Add sample video URL
300
+
301
+ # Features Section
302
+ st.markdown("## How It Works")
303
+ cols = st.columns(3)
304
+ with cols[0]:
305
+ st.image("upload-icon.png", width=100)
306
+ st.markdown("### Upload Video")
307
+ with cols[1]:
308
+ st.image("analyze-icon.png", width=100)
309
+ st.markdown("### AI Analysis")
310
+ with cols[2]:
311
+ st.image("result-icon.png", width=100)
312
+ st.markdown("### Get Results")
313
+
314
+
315
+ elif page == "Analyze Video":
316
+ uploaded_file = st.file_uploader("Upload a Video", type=["mp4", "mov"])
317
+
318
+ if uploaded_file is not None:
319
+ # Save uploaded file to a temporary location
320
+ with tempfile.NamedTemporaryFile(delete=False, suffix=".mp4") as temp_file:
321
+ temp_file.write(uploaded_file.read())
322
+ temp_video_path = temp_file.name # βœ… Correct variable name
323
+
324
+ st.video(temp_video_path)
325
+
326
+ if st.button("Analyze Video"):
327
+ with st.spinner("Analyzing..."):
328
+ try:
329
+ score = analyze_video(temp_video_path) # βœ… Ensure function exists
330
+
331
+ # Debugging Line
332
+ st.write(f"Analysis Score: {score}")
333
+ float(score)
334
+ # Display result based on score
335
+ if score >= 3.5 :
336
+ st.markdown(f"""
337
+ <div class='result-box suspicious'>
338
+ <p>This video shows major signs of manipulation</p>
339
+ </div>
340
+ """, unsafe_allow_html=True)
341
+ elif score >= 2.0:
342
+ st.markdown(f"""
343
+ <div class='result-box suspicious'>
344
+ <p>This video shows minor signs of manipulation</p>
345
+ </div>
346
+ """, unsafe_allow_html=True)
347
+ else:
348
+ st.markdown(f"""
349
+ <div class='result-box clean'>
350
+ <p>No significant manipulation detected</p>
351
+ </div>
352
+ """, unsafe_allow_html=True)
353
+ except Exception as e:
354
+ st.error(f"An error occurred during analysis: {e}")
355
+
356
+ elif page == "About": # βœ… Now this will work correctly
357
+ st.markdown("<h1 class='title'>About Us</h1>", unsafe_allow_html=True)
358
+
359
+ # Creator Profile
360
+ col1, col2 = st.columns(2)
361
+ with col1:
362
+ st.image("creator.jpg", width=300, caption="Ayush Agarwal, Lead Developer")
363
+ with col2:
364
+ st.markdown("""
365
+ <div class='about-text'>
366
+ ## Ayush Agarwal ,
367
+ Student at VIT Bhopal University ,
368
+ AIML enthusiast
369
+ <br><br>
370
+ πŸ“§ [email protected]
371
+ <br>
372
+ πŸ”— [LinkedIn](www.linkedin.com/in/ayush20039939)
373
+ <br>
374
+ πŸ™ [GitHub](https://github.com)
375
+ </div>
376
+ """, unsafe_allow_html=True)
377
+
378
+ # Technology Stack
379
+ st.markdown("## Our Technology")
380
+ st.markdown("""
381
+ <div class='tech-stack'>
382
+ <img src='https://img.icons8.com/color/96/000000/python.png'/>
383
+ <img src='https://img.icons8.com/color/96/000000/tensorflow.png'/>
384
+ <img src='https://img.icons8.com/color/96/000000/opencv.png'/>
385
+ <img src='https://raw.githubusercontent.com/github/explore/968d1eb8fb6b704c6be917f0000283face4f33ee/topics/streamlit/streamlit.png'/>
386
+ </div>
387
  """, unsafe_allow_html=True)