fix
Browse files
app.py
CHANGED
@@ -81,7 +81,7 @@ class VideoHighlightDetector:
|
|
81 |
).to(self.device)
|
82 |
|
83 |
outputs = self.model.generate(**inputs, max_new_tokens=512, do_sample=True, temperature=0.7)
|
84 |
-
return self.processor.decode(outputs[0], skip_special_tokens=True).lower().split("
|
85 |
|
86 |
def determine_highlights(self, video_description: str) -> str:
|
87 |
"""Determine what constitutes highlights based on video description."""
|
@@ -109,7 +109,7 @@ class VideoHighlightDetector:
|
|
109 |
).to(self.device)
|
110 |
|
111 |
outputs = self.model.generate(**inputs, max_new_tokens=256, do_sample=True, temperature=0.7)
|
112 |
-
return self.processor.decode(outputs[0], skip_special_tokens=True).
|
113 |
|
114 |
def process_segment(self, video_path: str, highlight_types: str) -> bool:
|
115 |
"""Process a video segment and determine if it contains highlights."""
|
@@ -137,7 +137,7 @@ class VideoHighlightDetector:
|
|
137 |
).to(self.device)
|
138 |
|
139 |
outputs = self.model.generate(**inputs, max_new_tokens=64, do_sample=False)
|
140 |
-
response = self.processor.decode(outputs[0], skip_special_tokens=True).lower().split("
|
141 |
|
142 |
return "yes" in response
|
143 |
|
|
|
81 |
).to(self.device)
|
82 |
|
83 |
outputs = self.model.generate(**inputs, max_new_tokens=512, do_sample=True, temperature=0.7)
|
84 |
+
return self.processor.decode(outputs[0], skip_special_tokens=True).lower().split("assistant: ")[1]
|
85 |
|
86 |
def determine_highlights(self, video_description: str) -> str:
|
87 |
"""Determine what constitutes highlights based on video description."""
|
|
|
109 |
).to(self.device)
|
110 |
|
111 |
outputs = self.model.generate(**inputs, max_new_tokens=256, do_sample=True, temperature=0.7)
|
112 |
+
return self.processor.decode(outputs[0], skip_special_tokens=True).split("Assistant: ")[1]
|
113 |
|
114 |
def process_segment(self, video_path: str, highlight_types: str) -> bool:
|
115 |
"""Process a video segment and determine if it contains highlights."""
|
|
|
137 |
).to(self.device)
|
138 |
|
139 |
outputs = self.model.generate(**inputs, max_new_tokens=64, do_sample=False)
|
140 |
+
response = self.processor.decode(outputs[0], skip_special_tokens=True).lower().split("assistant: ")[1]
|
141 |
|
142 |
return "yes" in response
|
143 |
|