File size: 22,022 Bytes
1cc174f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
"""

Phi-4 model implementation for video anomaly detection using Hugging Face transformers.

"""

import cv2
import numpy as np
import base64
import os
import tempfile
from PIL import Image
import io
import re
import torch
import time
from transformers import AutoModelForCausalLM, AutoTokenizer, AutoProcessor
from huggingface_hub import snapshot_download

class Phi4AnomalyDetector:
    def __init__(self, model_name="microsoft/Phi-4-multimodal-instruct"):
        """

        Initialize the Phi4AnomalyDetector with the Phi-4 vision model.

        

        Args:

            model_name (str): Name of the Phi-4 vision model on Hugging Face

        """
        self.model_name = model_name
        self.model_dir = os.path.join(os.getcwd(), "phi4_model")
        self.device = "cuda" if torch.cuda.is_available() else "cpu"
        
        # Load or download the model
        self.load_model()
        
    def load_model(self):
        """

        Load the Phi-4 model from local directory or download from Hugging Face.

        """
        try:
            if not os.path.exists(self.model_dir):
                print(f"Downloading {self.model_name} model to {self.model_dir}...")
                snapshot_download(repo_id=self.model_name, local_dir=self.model_dir)
                print("Model downloaded successfully.")
            else:
                print(f"Using existing model from {self.model_dir}")
            
            # Load model components with trust_remote_code=True
            self.processor = AutoProcessor.from_pretrained(
                self.model_dir,
                trust_remote_code=True
            )
            
            self.tokenizer = AutoTokenizer.from_pretrained(
                self.model_dir,
                trust_remote_code=True
            )
            
            # Load model with appropriate dtype based on device
            if self.device == "cuda":
                self.model = AutoModelForCausalLM.from_pretrained(
                    self.model_dir,
                    torch_dtype=torch.float16,
                    device_map="auto",
                    trust_remote_code=True
                )
            else:
                self.model = AutoModelForCausalLM.from_pretrained(
                    self.model_dir,
                    device_map="auto",
                    trust_remote_code=True
                )
            
            print(f"Phi-4 model loaded successfully on {self.device}")
            
        except Exception as e:
            raise RuntimeError(f"Failed to load Phi-4 model: {str(e)}")
    
    def extract_frames(self, video_path, skip_frames):
        """

        Extract frames from a video file, skipping the specified number of frames.

        

        Args:

            video_path (str): Path to the video file

            skip_frames (int): Number of frames to skip between captures

            

        Returns:

            list: List of extracted frames as numpy arrays

        """
        frames = []
        # Use the default backend for video files
        # DirectShow can cause issues with some video files
        cap = cv2.VideoCapture(video_path)
        
        # Don't set MJPG format for video files as it can interfere with proper decoding
        # cap.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc('M','J','P','G'))
        
        if not cap.isOpened():
            raise ValueError(f"Could not open video file: {video_path}")
        
        frame_count = 0
        while True:
            ret, frame = cap.read()
            if not ret:
                break
                
            if frame_count % (skip_frames + 1) == 0:
                # Convert from BGR to RGB
                rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
                frames.append(rgb_frame)
                
            frame_count += 1
            
        cap.release()
        return frames
    
    def process_live_stream(self, stream_source, skip_frames, prompt, analysis_depth="granular", max_frames=None, callback=None, time_interval=None):
        """

        Process frames from a live video stream.

        

        Args:

            stream_source: Stream source (0 for webcam, URL for IP camera or RTSP stream)

            skip_frames (int): Number of frames to skip between captures

            prompt (str): Prompt describing what anomaly to look for

            analysis_depth (str): "granular" for frame-by-frame analysis or "cumulative" for overall analysis

            max_frames (int, optional): Maximum number of frames to process (None for unlimited)

            callback (function, optional): Callback function to report progress

            time_interval (int, optional): If set, capture one frame every X seconds instead of using skip_frames

            

        Returns:

            list or dict: List of analysis results for each processed frame (granular) or dict with cumulative analysis (cumulative)

        """
        # Open the video stream with appropriate backend
        # Only use DirectShow for local webcams (0 or 1) on Windows
        if os.name == 'nt' and (stream_source == 0 or stream_source == 1):
            # This is a local webcam on Windows, use DirectShow
            cap = cv2.VideoCapture(stream_source, cv2.CAP_DSHOW)
            
            # For webcams, MJPG format can be more stable
            cap.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc('M','J','P','G'))
        else:
            # For IP cameras, RTSP streams, or non-Windows systems, use default backend
            cap = cv2.VideoCapture(stream_source)
        
        if not cap.isOpened():
            raise ValueError(f"Could not open video stream: {stream_source}")
        
        frames = []
        frame_count = 0
        processed_count = 0
        last_capture_time = time.time()
        
        try:
            while True:
                ret, frame = cap.read()
                if not ret:
                    break
                
                current_time = time.time()
                
                # Determine if we should capture this frame
                should_capture = False
                
                if time_interval is not None:
                    # Time-based interval mode
                    if current_time - last_capture_time >= time_interval:
                        should_capture = True
                        last_capture_time = current_time
                else:
                    # Frame-skip mode
                    if frame_count % (skip_frames + 1) == 0:
                        should_capture = True
                
                if should_capture:
                    # Convert from BGR to RGB
                    rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
                    frames.append(rgb_frame)
                    processed_count += 1
                    
                    # Process the frame immediately in time interval mode
                    if time_interval is not None:
                        # Process the frame immediately
                        result = self.analyze_frame(rgb_frame, prompt)
                        
                        # Make sure the frame is included in the result
                        if "frame" not in result:
                            result["frame"] = rgb_frame
                        
                        # If we have a callback, call it with -1 for total to indicate continuous mode
                        if callback:
                            callback(processed_count, -1)
                        
                        # In time interval mode, we yield results one by one
                        if analysis_depth == "granular":
                            yield result
                        else:
                            # For cumulative analysis, we need to keep all frames
                            # We'll handle this outside the loop
                            pass
                    else:
                        # Update progress if callback is provided (frame count mode)
                        if callback and max_frames:
                            callback(processed_count, max_frames)
                        
                        # Break if we've reached the maximum number of frames
                        if max_frames and processed_count >= max_frames:
                            break
                
                frame_count += 1
        finally:
            cap.release()
        
        # If we're in time interval mode with cumulative analysis, we don't return here
        # as we're yielding results above
        if time_interval is not None and analysis_depth == "cumulative":
            # This is a special case - we need to periodically do cumulative analysis
            # For simplicity, we'll just return the current cumulative analysis
            result = self.analyze_frames_cumulatively(frames, prompt, callback)
            yield result
            return
            
        # Process the collected frames for non-time interval mode
        if time_interval is None:
            if analysis_depth == "cumulative":
                return self.analyze_frames_cumulatively(frames, prompt, callback)
            else:  # granular (default)
                results = []
                
                for i, frame in enumerate(frames):
                    if callback:
                        callback(i, len(frames))
                        
                    result = self.analyze_frame(frame, prompt)
                    results.append(result)
                    
                return results
    
    def analyze_frame(self, frame, prompt):
        """

        Analyze a frame using the Phi-4 vision model.

        

        Args:

            frame (numpy.ndarray): Frame to analyze

            prompt (str): Prompt describing what anomaly to look for

            

        Returns:

            dict: Analysis result from the model

        """
        # Convert numpy array to PIL Image
        pil_image = Image.fromarray(frame)
        
        # Enhanced prompt to get structured information about anomalies
        enhanced_prompt = f"""

{prompt}



After your analysis, please include a structured assessment at the end of your response in this exact format:

ANOMALY_DETECTED: [Yes/No]

ANOMALY_TYPE: [Human/Non-human/None]

CONFIDENCE: [0-100]



For ANOMALY_DETECTED, answer "Yes" if you detect any anomaly, otherwise "No".

For ANOMALY_TYPE, if an anomaly is detected, classify it as either "Human" (if it involves people or human activities) or "Non-human" (if it involves objects, animals, or environmental factors). If no anomaly is detected, use "None".

For CONFIDENCE, provide a number from 0 to 100 indicating your confidence level in the assessment.

"""
        
        try:
            # Process the image and prompt with the Phi-4 model
            inputs = self.processor(text=enhanced_prompt, images=pil_image, return_tensors="pt")
            
            # Move inputs to the same device as the model
            for key in inputs:
                if torch.is_tensor(inputs[key]):
                    inputs[key] = inputs[key].to(self.model.device)
            
            # Generate response
            with torch.no_grad():
                outputs = self.model.generate(
                    **inputs,
                    max_new_tokens=500,
                    do_sample=False
                )
            
            # Decode the response
            response_text = self.tokenizer.decode(outputs[0], skip_special_tokens=True)
            
            # Extract the part after the prompt
            if enhanced_prompt in response_text:
                response_text = response_text.split(enhanced_prompt)[-1].strip()
            
            # Extract anomaly detection information using regex
            anomaly_detected = False
            anomaly_type = "None"
            confidence = 0
            
            # Look for the structured format
            anomaly_match = re.search(r'ANOMALY_DETECTED:\s*(Yes|No)', response_text, re.IGNORECASE)
            if anomaly_match and anomaly_match.group(1).lower() == 'yes':
                anomaly_detected = True
                confidence = 90  # Default high confidence when anomaly is detected
                
                # If anomaly detected, look for the type
                type_match = re.search(r'ANOMALY_TYPE:\s*(Human|Non-human|None)', response_text, re.IGNORECASE)
                if type_match:
                    anomaly_type = type_match.group(1)
                    
                # Look for confidence information
                conf_match = re.search(r'CONFIDENCE:\s*(\d+)', response_text, re.IGNORECASE)
                if conf_match:
                    try:
                        confidence = int(conf_match.group(1))
                    except:
                        pass  # Keep default confidence if parsing fails
            
            return {
                "text": response_text,
                "analysis": response_text,  # Add analysis field as an alias for text
                "frame": frame,
                "anomaly_detected": anomaly_detected,
                "anomaly_type": anomaly_type,
                "confidence": confidence,  # Add confidence field
                "timestamp": time.time()  # Add timestamp for live stream analysis
            }
        except Exception as e:
            return {
                "error": str(e),
                "frame": frame,
                "anomaly_detected": False,
                "anomaly_type": "None",
                "confidence": 0,  # Add default confidence for error
                "timestamp": time.time()  # Add timestamp for live stream analysis
            }
    
    def analyze_frames_cumulatively(self, frames, prompt, callback=None):
        """

        Analyze all frames together and provide a cumulative analysis.

        

        Args:

            frames (list): List of frames to analyze

            prompt (str): Prompt describing what anomaly to look for

            callback (function, optional): Callback function to report progress

            

        Returns:

            dict: Cumulative analysis result

        """
        # First, analyze each frame individually to identify potential anomalies
        individual_results = []
        for i, frame in enumerate(frames):
            if callback:
                callback(i, len(frames) * 2)  # First half of progress for individual analysis
                
            result = self.analyze_frame(frame, f"{prompt} Provide a brief analysis of this frame only.")
            individual_results.append(result)
        
        # Identify frames with potential anomalies
        anomaly_frames = []
        anomaly_descriptions = []
        anomaly_types = []
        
        for i, result in enumerate(individual_results):
            if "error" not in result and result["anomaly_detected"]:
                anomaly_frames.append(result["frame"])
                anomaly_descriptions.append(f"Frame {i+1}: {result['text']}")
                anomaly_types.append(result["anomaly_type"])
                
                # Limit to 3 anomaly frames
                if len(anomaly_frames) >= 3:
                    break
        
        # If no anomalies were detected, use the first, middle, and last frames
        if not anomaly_frames and len(frames) > 0:
            if len(frames) == 1:
                anomaly_frames = [frames[0]]
            elif len(frames) == 2:
                anomaly_frames = [frames[0], frames[1]]
            else:
                anomaly_frames = [
                    frames[0],
                    frames[len(frames) // 2],
                    frames[-1]
                ]
        
        # Limit to max 3 frames
        anomaly_frames = anomaly_frames[:3]
        
        # Create a cumulative analysis prompt with the anomaly descriptions
        cumulative_prompt = f"""

{prompt}



Based on the analysis of all frames, provide a comprehensive summary of any anomalies detected in the video. Focus on patterns or recurring issues. Here are some notable observations from individual frames:



{chr(10).join(anomaly_descriptions[:5])}



After your analysis, please include a structured assessment at the end of your response in this exact format:

ANOMALY_DETECTED: [Yes/No]

ANOMALY_TYPE: [Human/Non-human/None]



For ANOMALY_DETECTED, answer "Yes" if you detect any anomaly across the video, otherwise "No".

For ANOMALY_TYPE, if an anomaly is detected, classify the predominant type as either "Human" (if it involves people or human activities) or "Non-human" (if it involves objects, animals, or environmental factors). If no anomaly is detected, use "None".

"""
        
        try:
            if callback:
                callback(len(frames), len(frames) * 2)  # Second half of progress for cumulative analysis
            
            # For cumulative analysis, we'll use the first anomaly frame (or first frame if no anomalies)
            representative_frame = anomaly_frames[0] if anomaly_frames else frames[0]
            pil_image = Image.fromarray(representative_frame)
            
            # Process with Phi-4
            inputs = self.processor(text=cumulative_prompt, images=pil_image, return_tensors="pt")
            
            # Move inputs to the same device as the model
            for key in inputs:
                if torch.is_tensor(inputs[key]):
                    inputs[key] = inputs[key].to(self.model.device)
            
            # Generate response
            with torch.no_grad():
                outputs = self.model.generate(
                    **inputs,
                    max_new_tokens=1000,
                    do_sample=False
                )
            
            # Decode the response
            response_text = self.tokenizer.decode(outputs[0], skip_special_tokens=True)
            
            # Extract the part after the prompt
            if cumulative_prompt in response_text:
                response_text = response_text.split(cumulative_prompt)[-1].strip()
            
            # Extract anomaly detection information using regex
            anomaly_detected = False
            anomaly_type = "None"
            confidence = 0
            
            # Look for the structured format
            anomaly_match = re.search(r'ANOMALY_DETECTED:\s*(Yes|No)', response_text, re.IGNORECASE)
            if anomaly_match and anomaly_match.group(1).lower() == 'yes':
                anomaly_detected = True
                confidence = 90  # Default high confidence when anomaly is detected
                
                # If anomaly detected, look for the type
                type_match = re.search(r'ANOMALY_TYPE:\s*(Human|Non-human|None)', response_text, re.IGNORECASE)
                if type_match:
                    anomaly_type = type_match.group(1)
                    
                # Look for confidence information
                conf_match = re.search(r'CONFIDENCE:\s*(\d+)', response_text, re.IGNORECASE)
                if conf_match:
                    try:
                        confidence = int(conf_match.group(1))
                    except:
                        pass  # Keep default confidence if parsing fails
            
            return {
                "text": response_text,
                "analysis": response_text,  # Add analysis field as an alias for text
                "frames": anomaly_frames,
                "anomaly_detected": anomaly_detected,
                "anomaly_type": anomaly_type,
                "confidence": confidence,  # Add confidence field
                "timestamp": time.time()  # Add timestamp for live stream analysis
            }
        except Exception as e:
            return {
                "error": str(e),
                "frames": anomaly_frames,
                "anomaly_detected": False,
                "anomaly_type": "None",
                "confidence": 0,  # Add default confidence for error
                "timestamp": time.time()  # Add timestamp for live stream analysis
            }
    
    def process_video(self, video_path, skip_frames, prompt, analysis_depth="granular", callback=None):
        """

        Process a video file, extracting frames and analyzing them for anomalies.

        

        Args:

            video_path (str): Path to the video file

            skip_frames (int): Number of frames to skip between captures

            prompt (str): Prompt describing what anomaly to look for

            analysis_depth (str): "granular" for frame-by-frame analysis or "cumulative" for overall analysis

            callback (function, optional): Callback function to report progress

            

        Returns:

            list or dict: List of analysis results for each processed frame (granular) or dict with cumulative analysis (cumulative)

        """
        frames = self.extract_frames(video_path, skip_frames)
        
        if analysis_depth == "cumulative":
            return self.analyze_frames_cumulatively(frames, prompt, callback)
        else:  # granular (default)
            results = []
            
            for i, frame in enumerate(frames):
                if callback:
                    callback(i, len(frames))
                    
                result = self.analyze_frame(frame, prompt)
                results.append(result)
                
            return results