ayush200399391001 commited on
Commit
5883ac3
Β·
verified Β·
1 Parent(s): 7f3dc47

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +61 -131
app.py CHANGED
@@ -11,9 +11,8 @@ from transformers import (
11
  AutoImageProcessor,
12
  AutoModelForObjectDetection
13
  )
14
- import tempfile
15
  import streamlit as st
16
- from huggingface_hub import hf_hub_download
17
 
18
  # -------------------- Configuration -------------------- #
19
  DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
@@ -21,13 +20,13 @@ FRAME_EXTRACTION_INTERVAL = 0.01 # Seconds between frame captures
21
 
22
  # -------------------- Model Loading -------------------- #
23
  try:
24
- print("πŸ”„ Loading visual model and processor...")
25
  processor_visual = AutoImageProcessor.from_pretrained("facebook/detr-resnet-50")
26
  model_visual = AutoModelForObjectDetection.from_pretrained("facebook/detr-resnet-50").to(DEVICE)
27
- print(f"βœ… Model loaded on {DEVICE} successfully!")
28
  except Exception as e:
29
- print(f"❌ Error loading model: {e}")
30
- exit()
31
 
32
  # -------------------- Metadata Extraction -------------------- #
33
  def extract_metadata(video_path):
@@ -38,7 +37,7 @@ def extract_metadata(video_path):
38
  result = subprocess.run(cmd, capture_output=True, text=True)
39
  return json.loads(result.stdout)
40
  except Exception as e:
41
- print(f"❌ Metadata extraction failed: {e}")
42
  return {}
43
 
44
  # -------------------- Frame Extraction -------------------- #
@@ -48,7 +47,7 @@ def extract_frames(video_path, output_folder="frames"):
48
 
49
  cap = cv2.VideoCapture(video_path)
50
  if not cap.isOpened():
51
- print("❌ Could not open video file")
52
  return 0
53
 
54
  fps = cap.get(cv2.CAP_PROP_FPS)
@@ -87,7 +86,7 @@ def calculate_optical_flow(frames_folder):
87
  break
88
 
89
  if ref_height is None:
90
- print("⚠ No valid frames found for optical flow calculation")
91
  return []
92
 
93
  prev_gray = None
@@ -167,7 +166,7 @@ def detect_objects(frames_folder):
167
  })
168
 
169
  except Exception as e:
170
- print(f"⚠ Error processing {frame_file}: {e}")
171
  results.append({
172
  "frame": frame_file,
173
  "detections": 0,
@@ -232,142 +231,73 @@ def generate_report(visual_results, flow_results, output_file="report.json"):
232
  with open(output_file, "w") as f:
233
  json.dump(report_data, f, indent=2)
234
 
235
- return report_data # Added return statement
236
 
237
  # -------------------- Main Pipeline -------------------- #
238
  def analyze_video(video_path):
239
  """Complete video analysis workflow"""
240
- print("\nπŸ“‹ Metadata Extraction:")
241
  metadata = extract_metadata(video_path)
242
- print(json.dumps(metadata.get("streams", [{}])[0], indent=2))
243
 
244
- print("\n🎞 Frame Extraction:")
245
  frame_count = extract_frames(video_path)
246
- print(f"βœ… Extracted {frame_count} frames at {FRAME_EXTRACTION_INTERVAL}s intervals")
247
 
248
- print("\nπŸ” Running object detection...")
249
  visual_results = detect_objects("frames")
250
 
251
- print("\nπŸŒ€ Calculating optical flow...")
252
  flow_results = calculate_optical_flow("frames")
253
 
254
- print("\nπŸ“Š Generating Final Report...")
255
  report_data = generate_report(visual_results, flow_results)
256
 
257
- print("\nπŸ” Authenticity Analysis:")
258
  score = detect_manipulation() # This function should return a score
259
 
260
- print(f"\n🎯 Final Score: {score}") # Debugging line
261
  return score # βœ… Ensure this score is returned properly
262
 
263
- # -------------------- Execution -------------------- #
264
- # Streamlit Interface
265
-
266
- # Side Navigation
267
- st.sidebar.title("Navigation")
268
- page = st.sidebar.radio("", ["Home", "Analyze Video", "About"])
269
-
270
- # Home Page
271
- if page == "Home":
272
- st.markdown("<h1 class='title'>Video Manipulation Detection</h1>", unsafe_allow_html=True)
273
-
274
- # Hero Section
275
- col1, col2 = st.columns(2)
276
- with col1:
277
- st.markdown("""
278
- <div class='hero-text'>
279
- Detect manipulated videos with AI-powered analysis.
280
- Protect yourself from deepfakes and synthetic media.
281
- </div>
282
- """, unsafe_allow_html=True)
283
-
284
- with col2:
285
- st.video("Realistic Universe Intro_free.mp4") # Add sample video URL
286
-
287
- # Features Section
288
- st.markdown("## How It Works")
289
- cols = st.columns(3)
290
- with cols[0]:
291
- st.image("upload-icon.png", width=100)
292
- st.markdown("### Upload Video")
293
- with cols[1]:
294
- st.image("analyze-icon.png", width=100)
295
- st.markdown("### AI Analysis")
296
- with cols[2]:
297
- st.image("result-icon.png", width=100)
298
- st.markdown("### Get Results")
299
-
300
-
301
- elif page == "Analyze Video":
302
- uploaded_file = st.file_uploader("Upload a Video", type=["mp4", "mov"])
303
-
304
- if uploaded_file is not None:
305
- # Save uploaded file to a temporary location
306
- with tempfile.NamedTemporaryFile(delete=False, suffix=".mp4") as temp_file:
307
- temp_file.write(uploaded_file.read())
308
- temp_video_path = temp_file.name # βœ… Correct variable name
309
-
310
- st.video(temp_video_path)
311
-
312
- if st.button("Analyze Video", label_visibility="collapsed"):
313
- with st.spinner("Analyzing..."):
314
- try:
315
- score = analyze_video(temp_video_path) # βœ… Ensure function exists
316
-
317
- st.write(f"Analysis Score: {score}")
318
- float(score)
319
-
320
- # Display result based on score
321
- if score >= 3.5 :
322
- st.markdown(f"""
323
- <div class='result-box suspicious'>
324
- <p>This video shows major signs of manipulation</p>
325
- </div>
326
- """, unsafe_allow_html=True)
327
- elif score >= 2.0:
328
- st.markdown(f"""
329
- <div class='result-box suspicious'>
330
- <p>This video shows minor signs of manipulation</p>
331
- </div>
332
- """, unsafe_allow_html=True)
333
- else:
334
- st.markdown(f"""
335
- <div class='result-box clean'>
336
- <p>No significant manipulation detected</p>
337
- </div>
338
- """, unsafe_allow_html=True)
339
- except Exception as e:
340
- st.error(f"An error occurred during analysis: {e}")
341
-
342
- elif page == "About":
343
- st.markdown("<h1 class='title'>About Us</h1>", unsafe_allow_html=True)
344
-
345
- # Creator Profile
346
- col1, col2 = st.columns(2)
347
- with col1:
348
- st.image("creator.jpg", width=300, caption="Ayush Agarwal, Lead Developer")
349
- with col2:
350
- st.markdown("""
351
- <div class='about-text'>
352
- ## Ayush Agarwal ,
353
- Student at VIT Bhopal University ,
354
- AIML enthusiast
355
- <br><br>
356
- πŸ“§ [email protected]
357
- <br>
358
- πŸ”— [LinkedIn](www.linkedin.com/in/ayush20039939)
359
- <br>
360
- πŸ™ [GitHub](https://github.com/Ayush99392003/Video_MAnipulation_Detection)
361
- </div>
362
- """, unsafe_allow_html=True)
363
-
364
- # Technology Stack
365
- st.markdown("## Our Technology")
366
- st.markdown("""
367
- <div class='tech-stack'>
368
- <img src='https://img.icons8.com/color/96/000000/python.png'/>
369
- <img src='https://img.icons8.com/color/96/000000/tensorflow.png'/>
370
- <img src='https://img.icons8.com/color/96/000000/opencv.png'/>
371
- <img src='https://raw.githubusercontent.com/github/explore/968d1eb8fb6b704c6be917f0000283face4f33ee/topics/streamlit/streamlit.png'/>
372
- </div>
373
- """, unsafe_allow_html=True)
 
11
  AutoImageProcessor,
12
  AutoModelForObjectDetection
13
  )
 
14
  import streamlit as st
15
+ import tempfile
16
 
17
  # -------------------- Configuration -------------------- #
18
  DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
 
20
 
21
  # -------------------- Model Loading -------------------- #
22
  try:
23
+ st.write("πŸ”„ Loading visual model and processor...")
24
  processor_visual = AutoImageProcessor.from_pretrained("facebook/detr-resnet-50")
25
  model_visual = AutoModelForObjectDetection.from_pretrained("facebook/detr-resnet-50").to(DEVICE)
26
+ st.write(f"βœ… Model loaded on {DEVICE} successfully!")
27
  except Exception as e:
28
+ st.error(f"❌ Error loading model: {e}")
29
+ st.stop()
30
 
31
  # -------------------- Metadata Extraction -------------------- #
32
  def extract_metadata(video_path):
 
37
  result = subprocess.run(cmd, capture_output=True, text=True)
38
  return json.loads(result.stdout)
39
  except Exception as e:
40
+ st.error(f"❌ Metadata extraction failed: {e}")
41
  return {}
42
 
43
  # -------------------- Frame Extraction -------------------- #
 
47
 
48
  cap = cv2.VideoCapture(video_path)
49
  if not cap.isOpened():
50
+ st.error("❌ Could not open video file")
51
  return 0
52
 
53
  fps = cap.get(cv2.CAP_PROP_FPS)
 
86
  break
87
 
88
  if ref_height is None:
89
+ st.warning("⚠ No valid frames found for optical flow calculation")
90
  return []
91
 
92
  prev_gray = None
 
166
  })
167
 
168
  except Exception as e:
169
+ st.warning(f"⚠ Error processing {frame_file}: {e}")
170
  results.append({
171
  "frame": frame_file,
172
  "detections": 0,
 
231
  with open(output_file, "w") as f:
232
  json.dump(report_data, f, indent=2)
233
 
234
+ return report_data
235
 
236
  # -------------------- Main Pipeline -------------------- #
237
  def analyze_video(video_path):
238
  """Complete video analysis workflow"""
239
+ st.write("\nπŸ“‹ Metadata Extraction:")
240
  metadata = extract_metadata(video_path)
241
+ st.write(json.dumps(metadata.get("streams", [{}])[0], indent=2))
242
 
243
+ st.write("\n🎞 Frame Extraction:")
244
  frame_count = extract_frames(video_path)
245
+ st.write(f"βœ… Extracted {frame_count} frames at {FRAME_EXTRACTION_INTERVAL}s intervals")
246
 
247
+ st.write("\nπŸ” Running object detection...")
248
  visual_results = detect_objects("frames")
249
 
250
+ st.write("\nπŸŒ€ Calculating optical flow...")
251
  flow_results = calculate_optical_flow("frames")
252
 
253
+ st.write("\nπŸ“Š Generating Final Report...")
254
  report_data = generate_report(visual_results, flow_results)
255
 
256
+ st.write("\nπŸ” Authenticity Analysis:")
257
  score = detect_manipulation() # This function should return a score
258
 
259
+ st.write(f"\n🎯 Final Score: {score}") # Debugging line
260
  return score # βœ… Ensure this score is returned properly
261
 
262
+ # -------------------- Streamlit App -------------------- #
263
+ st.title("Video Manipulation Detection")
264
+
265
+ uploaded_file = st.file_uploader("Upload a Video", type=["mp4", "mov"])
266
+
267
+ if uploaded_file is not None:
268
+ # Save uploaded file to a temporary location
269
+ with tempfile.NamedTemporaryFile(delete=False, suffix=".mp4") as temp_file:
270
+ temp_file.write(uploaded_file.read())
271
+ temp_video_path = temp_file.name # βœ… Correct variable name
272
+
273
+ st.video(temp_video_path)
274
+
275
+ if st.button("Analyze Video"):
276
+ with st.spinner("Analyzing..."):
277
+ try:
278
+ score = analyze_video(temp_video_path) # βœ… Ensure function exists
279
+
280
+ # Debugging Line
281
+ st.write(f"Analysis Score: {score}")
282
+ float(score)
283
+ # Display result based on score
284
+ if score >= 3.5 :
285
+ st.markdown(f"""
286
+ <div class='result-box suspicious'>
287
+ <p>This video shows major signs of manipulation</p>
288
+ </div>
289
+ """, unsafe_allow_html=True)
290
+ elif score >= 2.0:
291
+ st.markdown(f"""
292
+ <div class='result-box suspicious'>
293
+ <p>This video shows minor signs of manipulation</p>
294
+ </div>
295
+ """, unsafe_allow_html=True)
296
+ else:
297
+ st.markdown(f"""
298
+ <div class='result-box clean'>
299
+ <p>No significant manipulation detected</p>
300
+ </div>
301
+ """, unsafe_allow_html=True)
302
+ except Exception as e:
303
+ st.error(f"An error occurred during analysis: {e}")