ayush200399391001 commited on
Commit
d149ec1
Β·
verified Β·
1 Parent(s): 5883ac3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +146 -62
app.py CHANGED
@@ -11,7 +11,7 @@ from transformers import (
11
  AutoImageProcessor,
12
  AutoModelForObjectDetection
13
  )
14
- import streamlit as st
15
  import tempfile
16
 
17
  # -------------------- Configuration -------------------- #
@@ -20,13 +20,13 @@ FRAME_EXTRACTION_INTERVAL = 0.01 # Seconds between frame captures
20
 
21
  # -------------------- Model Loading -------------------- #
22
  try:
23
- st.write("πŸ”„ Loading visual model and processor...")
24
  processor_visual = AutoImageProcessor.from_pretrained("facebook/detr-resnet-50")
25
  model_visual = AutoModelForObjectDetection.from_pretrained("facebook/detr-resnet-50").to(DEVICE)
26
- st.write(f"βœ… Model loaded on {DEVICE} successfully!")
27
  except Exception as e:
28
- st.error(f"❌ Error loading model: {e}")
29
- st.stop()
30
 
31
  # -------------------- Metadata Extraction -------------------- #
32
  def extract_metadata(video_path):
@@ -37,7 +37,7 @@ def extract_metadata(video_path):
37
  result = subprocess.run(cmd, capture_output=True, text=True)
38
  return json.loads(result.stdout)
39
  except Exception as e:
40
- st.error(f"❌ Metadata extraction failed: {e}")
41
  return {}
42
 
43
  # -------------------- Frame Extraction -------------------- #
@@ -47,7 +47,7 @@ def extract_frames(video_path, output_folder="frames"):
47
 
48
  cap = cv2.VideoCapture(video_path)
49
  if not cap.isOpened():
50
- st.error("❌ Could not open video file")
51
  return 0
52
 
53
  fps = cap.get(cv2.CAP_PROP_FPS)
@@ -70,7 +70,6 @@ def extract_frames(video_path, output_folder="frames"):
70
 
71
  cap.release()
72
  return frame_count
73
-
74
  # -------------------- Optical Flow Calculation -------------------- #
75
  def calculate_optical_flow(frames_folder):
76
  """Calculates dense optical flow between consecutive frames with validation"""
@@ -86,7 +85,7 @@ def calculate_optical_flow(frames_folder):
86
  break
87
 
88
  if ref_height is None:
89
- st.warning("⚠ No valid frames found for optical flow calculation")
90
  return []
91
 
92
  prev_gray = None
@@ -166,7 +165,7 @@ def detect_objects(frames_folder):
166
  })
167
 
168
  except Exception as e:
169
- st.warning(f"⚠ Error processing {frame_file}: {e}")
170
  results.append({
171
  "frame": frame_file,
172
  "detections": 0,
@@ -212,6 +211,7 @@ def detect_manipulation(report_path="report.json"):
212
  except Exception as e:
213
  return f"❌ Error in analysis: {str(e)}"
214
 
 
215
  # -------------------- Reporting -------------------- #
216
  def generate_report(visual_results, flow_results, output_file="report.json"):
217
  """Generates comprehensive analysis report"""
@@ -231,73 +231,157 @@ def generate_report(visual_results, flow_results, output_file="report.json"):
231
  with open(output_file, "w") as f:
232
  json.dump(report_data, f, indent=2)
233
 
234
- return report_data
 
 
235
 
236
  # -------------------- Main Pipeline -------------------- #
237
  def analyze_video(video_path):
238
  """Complete video analysis workflow"""
239
- st.write("\nπŸ“‹ Metadata Extraction:")
240
  metadata = extract_metadata(video_path)
241
- st.write(json.dumps(metadata.get("streams", [{}])[0], indent=2))
242
 
243
- st.write("\n🎞 Frame Extraction:")
244
  frame_count = extract_frames(video_path)
245
- st.write(f"βœ… Extracted {frame_count} frames at {FRAME_EXTRACTION_INTERVAL}s intervals")
246
 
247
- st.write("\nπŸ” Running object detection...")
248
  visual_results = detect_objects("frames")
249
 
250
- st.write("\nπŸŒ€ Calculating optical flow...")
251
  flow_results = calculate_optical_flow("frames")
252
 
253
- st.write("\nπŸ“Š Generating Final Report...")
254
  report_data = generate_report(visual_results, flow_results)
255
 
256
- st.write("\nπŸ” Authenticity Analysis:")
257
  score = detect_manipulation() # This function should return a score
258
 
259
- st.write(f"\n🎯 Final Score: {score}") # Debugging line
260
  return score # βœ… Ensure this score is returned properly
261
 
262
- # -------------------- Streamlit App -------------------- #
263
- st.title("Video Manipulation Detection")
264
-
265
- uploaded_file = st.file_uploader("Upload a Video", type=["mp4", "mov"])
266
-
267
- if uploaded_file is not None:
268
- # Save uploaded file to a temporary location
269
- with tempfile.NamedTemporaryFile(delete=False, suffix=".mp4") as temp_file:
270
- temp_file.write(uploaded_file.read())
271
- temp_video_path = temp_file.name # βœ… Correct variable name
272
-
273
- st.video(temp_video_path)
274
-
275
- if st.button("Analyze Video"):
276
- with st.spinner("Analyzing..."):
277
- try:
278
- score = analyze_video(temp_video_path) # βœ… Ensure function exists
279
-
280
- # Debugging Line
281
- st.write(f"Analysis Score: {score}")
282
- float(score)
283
- # Display result based on score
284
- if score >= 3.5 :
285
- st.markdown(f"""
286
- <div class='result-box suspicious'>
287
- <p>This video shows major signs of manipulation</p>
288
- </div>
289
- """, unsafe_allow_html=True)
290
- elif score >= 2.0:
291
- st.markdown(f"""
292
- <div class='result-box suspicious'>
293
- <p>This video shows minor signs of manipulation</p>
294
- </div>
295
- """, unsafe_allow_html=True)
296
- else:
297
- st.markdown(f"""
298
- <div class='result-box clean'>
299
- <p>No significant manipulation detected</p>
300
- </div>
301
- """, unsafe_allow_html=True)
302
- except Exception as e:
303
- st.error(f"An error occurred during analysis: {e}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
  AutoImageProcessor,
12
  AutoModelForObjectDetection
13
  )
14
+ import os
15
  import tempfile
16
 
17
  # -------------------- Configuration -------------------- #
 
20
 
21
  # -------------------- Model Loading -------------------- #
22
  try:
23
+ print("πŸ”„ Loading visual model and processor...")
24
  processor_visual = AutoImageProcessor.from_pretrained("facebook/detr-resnet-50")
25
  model_visual = AutoModelForObjectDetection.from_pretrained("facebook/detr-resnet-50").to(DEVICE)
26
+ print(f"βœ… Model loaded on {DEVICE} successfully!")
27
  except Exception as e:
28
+ print(f"❌ Error loading model: {e}")
29
+ exit()
30
 
31
  # -------------------- Metadata Extraction -------------------- #
32
  def extract_metadata(video_path):
 
37
  result = subprocess.run(cmd, capture_output=True, text=True)
38
  return json.loads(result.stdout)
39
  except Exception as e:
40
+ print(f"❌ Metadata extraction failed: {e}")
41
  return {}
42
 
43
  # -------------------- Frame Extraction -------------------- #
 
47
 
48
  cap = cv2.VideoCapture(video_path)
49
  if not cap.isOpened():
50
+ print("❌ Could not open video file")
51
  return 0
52
 
53
  fps = cap.get(cv2.CAP_PROP_FPS)
 
70
 
71
  cap.release()
72
  return frame_count
 
73
  # -------------------- Optical Flow Calculation -------------------- #
74
  def calculate_optical_flow(frames_folder):
75
  """Calculates dense optical flow between consecutive frames with validation"""
 
85
  break
86
 
87
  if ref_height is None:
88
+ print("⚠ No valid frames found for optical flow calculation")
89
  return []
90
 
91
  prev_gray = None
 
165
  })
166
 
167
  except Exception as e:
168
+ print(f"⚠ Error processing {frame_file}: {e}")
169
  results.append({
170
  "frame": frame_file,
171
  "detections": 0,
 
211
  except Exception as e:
212
  return f"❌ Error in analysis: {str(e)}"
213
 
214
+ # -------------------- Reporting -------------------- #
215
  # -------------------- Reporting -------------------- #
216
  def generate_report(visual_results, flow_results, output_file="report.json"):
217
  """Generates comprehensive analysis report"""
 
231
  with open(output_file, "w") as f:
232
  json.dump(report_data, f, indent=2)
233
 
234
+ # ... rest of visualization code ...
235
+
236
+ return report_data # Added return statement
237
 
238
  # -------------------- Main Pipeline -------------------- #
239
  def analyze_video(video_path):
240
  """Complete video analysis workflow"""
241
+ print("\nπŸ“‹ Metadata Extraction:")
242
  metadata = extract_metadata(video_path)
243
+ print(json.dumps(metadata.get("streams", [{}])[0], indent=2))
244
 
245
+ print("\n🎞 Frame Extraction:")
246
  frame_count = extract_frames(video_path)
247
+ print(f"βœ… Extracted {frame_count} frames at {FRAME_EXTRACTION_INTERVAL}s intervals")
248
 
249
+ print("\nπŸ” Running object detection...")
250
  visual_results = detect_objects("frames")
251
 
252
+ print("\nπŸŒ€ Calculating optical flow...")
253
  flow_results = calculate_optical_flow("frames")
254
 
255
+ print("\nπŸ“Š Generating Final Report...")
256
  report_data = generate_report(visual_results, flow_results)
257
 
258
+ print("\nπŸ” Authenticity Analysis:")
259
  score = detect_manipulation() # This function should return a score
260
 
261
+ print(f"\n🎯 Final Score: {score}") # Debugging line
262
  return score # βœ… Ensure this score is returned properly
263
 
264
+
265
+
266
+ # -------------------- Execution -------------------- #
267
+
268
+
269
+
270
+ #--------------------------------Streamlit---------------------------------------------#
271
+ #--------------------------------Streamlit---------------------------------------------#
272
+ import streamlit as st
273
+ import tempfile
274
+ def local_css(file_name):
275
+ with open(file_name) as f:
276
+ st.markdown(f"<style>{f.read()}</style>", unsafe_allow_html=True)
277
+ local_css("style.css") # Ensure you have a separate style.css file
278
+
279
+ # Sidebar for Navigation
280
+ # Navigation
281
+ st.sidebar.title("Navigation")
282
+ page = st.sidebar.radio("", ["Home", "Analyze Video", "About"])
283
+
284
+ # Home Page
285
+ if page == "Home":
286
+ st.markdown("<h1 class='title'>Video Manipulation Detection</h1>", unsafe_allow_html=True)
287
+
288
+ # Hero Section
289
+ col1, col2 = st.columns(2)
290
+ with col1:
291
+ st.markdown("""
292
+ <div class='hero-text'>
293
+ Detect manipulated videos with AI-powered analysis.
294
+ Protect yourself from deepfakes and synthetic media.
295
+ </div>
296
+ """, unsafe_allow_html=True)
297
+
298
+ with col2:
299
+ st.video("Realistic Universe Intro_free.mp4") # Add sample video URL
300
+
301
+ # Features Section
302
+ st.markdown("## How It Works")
303
+ cols = st.columns(3)
304
+ with cols[0]:
305
+ st.image("upload-icon.png", width=100)
306
+ st.markdown("### Upload Video")
307
+ with cols[1]:
308
+ st.image("analyze-icon.png", width=100)
309
+ st.markdown("### AI Analysis")
310
+ with cols[2]:
311
+ st.image("result-icon.png", width=100)
312
+ st.markdown("### Get Results")
313
+
314
+
315
+ elif page == "Analyze Video":
316
+ uploaded_file = st.file_uploader("Upload a Video", type=["mp4", "mov"])
317
+
318
+ if uploaded_file is not None:
319
+ # Save uploaded file to a temporary location
320
+ with tempfile.NamedTemporaryFile(delete=False, suffix=".mp4") as temp_file:
321
+ temp_file.write(uploaded_file.read())
322
+ temp_video_path = temp_file.name # βœ… Correct variable name
323
+
324
+ st.video(temp_video_path)
325
+
326
+ if st.button("Analyze Video"):
327
+ with st.spinner("Analyzing..."):
328
+ try:
329
+ score = analyze_video(temp_video_path) # βœ… Ensure function exists
330
+
331
+ # Debugging Line
332
+ st.write(f"Analysis Score: {score}")
333
+ float(score)
334
+ # Display result based on score
335
+ if score >= 3.5 :
336
+ st.markdown(f"""
337
+ <div class='result-box suspicious'>
338
+ <p>This video shows major signs of manipulation</p>
339
+ </div>
340
+ """, unsafe_allow_html=True)
341
+ elif score >= 2.0:
342
+ st.markdown(f"""
343
+ <div class='result-box suspicious'>
344
+ <p>This video shows minor signs of manipulation</p>
345
+ </div>
346
+ """, unsafe_allow_html=True)
347
+ else:
348
+ st.markdown(f"""
349
+ <div class='result-box clean'>
350
+ <p>No significant manipulation detected</p>
351
+ </div>
352
+ """, unsafe_allow_html=True)
353
+ except Exception as e:
354
+ st.error(f"An error occurred during analysis: {e}")
355
+
356
+ elif page == "About": # βœ… Now this will work correctly
357
+ st.markdown("<h1 class='title'>About Us</h1>", unsafe_allow_html=True)
358
+
359
+ # Creator Profile
360
+ col1, col2 = st.columns(2)
361
+ with col1:
362
+ st.image("creator.jpg", width=300, caption="Ayush Agarwal, Lead Developer")
363
+ with col2:
364
+ st.markdown("""
365
+ <div class='about-text'>
366
+ ## Ayush Agarwal ,
367
+ Student at VIT Bhopal University ,
368
+ AIML enthusiast
369
+ <br><br>
370
+ πŸ“§ [email protected]
371
+ <br>
372
+ πŸ”— [LinkedIn](www.linkedin.com/in/ayush20039939)
373
+ <br>
374
+ πŸ™ [GitHub](https://github.com)
375
+ </div>
376
+ """, unsafe_allow_html=True)
377
+
378
+ # Technology Stack
379
+ st.markdown("## Our Technology")
380
+ st.markdown("""
381
+ <div class='tech-stack'>
382
+ <img src='https://img.icons8.com/color/96/000000/python.png'/>
383
+ <img src='https://img.icons8.com/color/96/000000/tensorflow.png'/>
384
+ <img src='https://img.icons8.com/color/96/000000/opencv.png'/>
385
+ <img src='https://raw.githubusercontent.com/github/explore/968d1eb8fb6b704c6be917f0000283face4f33ee/topics/streamlit/streamlit.png'/>
386
+ </div>
387
+ """, unsafe_allow_html=True)