Abdo-Alshoki commited on
Commit
d620482
·
verified ·
1 Parent(s): aaadccf

creating the app.py with the initial setup and loading

Browse files
Files changed (1) hide show
  1. app.py +104 -0
app.py ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import gradio as gr
3
+ import torch.nn as nn
4
+ import torchvision
5
+ import cv2
6
+ import numpy as np
7
+ import tempfile
8
+
9
+
10
+
11
+ class MyModel(nn.Module):
12
+ def __init__(self, num_classes=1):
13
+ super(MyModel, self).__init__() # Initialize nn.Module
14
+
15
+ self.model = torchvision.models.video.r3d_18(pretrained=True)
16
+
17
+ self.model.fc = nn.Linear(self.model.fc.in_features, num_classes)
18
+
19
+ def preprocess_video(self, video_path, num_frames=40):
20
+ """Preprocess video: sample frames, resize, normalize, and return tensor."""
21
+ cap = cv2.VideoCapture(video_path)
22
+ total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
23
+ frame_indices = np.linspace(0, total_frames - 1, num=num_frames, dtype=int)
24
+ sampled_frames = []
25
+
26
+ for idx in frame_indices:
27
+ cap.set(cv2.CAP_PROP_POS_FRAMES, idx)
28
+ ret, frame = cap.read()
29
+ if not ret:
30
+ continue
31
+ frame = cv2.resize(frame, (112, 112)) # Resize to 112x112 for R3D-18
32
+ frame = np.transpose(frame, (2, 0, 1)) # Channels-first
33
+ sampled_frames.append(frame)
34
+
35
+ cap.release()
36
+
37
+ if len(sampled_frames) < num_frames:
38
+ padding = np.zeros((num_frames - len(sampled_frames), 3, 112, 112))
39
+ sampled_frames = np.concatenate([sampled_frames, padding], axis=0)
40
+
41
+ # Convert to tensor and rearrange dimensions to (3, num_frames, 112, 112)
42
+ return torch.tensor(sampled_frames).float().permute(1, 0, 2, 3).unsqueeze(0)
43
+
44
+
45
+ def forward(self, x):
46
+ return self.model(x)
47
+
48
+ def test(self, video_paths):
49
+ """Test the model on the given videos and compute accuracy."""
50
+ self.model.eval()
51
+
52
+ predictions = []
53
+
54
+ with torch.no_grad():
55
+ for i, video_path in enumerate(video_paths):
56
+
57
+ X = self.preprocess_video(video_path)
58
+
59
+ output = self.model(X)
60
+ pred = torch.sigmoid(output) # Apply sigmoid for binary classification
61
+
62
+ # Track predictions
63
+ predictions.append(pred.item())
64
+
65
+
66
+ return predictions
67
+
68
+ def save_model(self, filepath):
69
+ torch.save({
70
+ 'model_state_dict': self.state_dict(),
71
+ }, filepath)
72
+
73
+
74
+ @staticmethod
75
+ def load_model(filepath, num_classes=1):
76
+ model = MyModel(num_classes)
77
+ checkpoint = torch.load(filepath, weights_only=True)
78
+ model.load_state_dict(checkpoint['model_state_dict'])
79
+
80
+ model.eval()
81
+ return model
82
+
83
+
84
+ model = MyModel().load_model('pre_3D_model.h5')
85
+
86
+ def classify_video(video):
87
+ prob = model.test(video)
88
+
89
+ label = "Non-violent" if prob >= 0.5 else "Violent"
90
+
91
+ return label, prob
92
+
93
+
94
+
95
+ # Set up the Gradio interface
96
+ interface = gr.Interface(
97
+ fn=classify_video,
98
+ inputs=gr.Video(), # Allows video upload
99
+ outputs=[gr.Text(), gr.Number()], # Outputs classification and probability
100
+ title="Violence Detection in Videos",
101
+ description="Upload a video to classify it as violent or non-violent with a probability score."
102
+ )
103
+
104
+ interface.launch(share=True, debug=True)