cleanup
Browse files- app.py +1 -1
- requirements.txt +1 -1
app.py
CHANGED
@@ -52,7 +52,7 @@ class VideoHighlightDetector:
|
|
52 |
self.model = AutoModelForVision2Seq.from_pretrained(
|
53 |
model_path,
|
54 |
torch_dtype=torch.bfloat16,
|
55 |
-
_attn_implementation="flash_attention_2"
|
56 |
).to(device)
|
57 |
|
58 |
def analyze_video_content(self, video_path: str) -> str:
|
|
|
52 |
self.model = AutoModelForVision2Seq.from_pretrained(
|
53 |
model_path,
|
54 |
torch_dtype=torch.bfloat16,
|
55 |
+
# _attn_implementation="flash_attention_2"
|
56 |
).to(device)
|
57 |
|
58 |
def analyze_video_content(self, video_path: str) -> str:
|
requirements.txt
CHANGED
@@ -4,7 +4,7 @@ accelerate
|
|
4 |
huggingface_hub
|
5 |
gradio
|
6 |
transformers@git+https://github.com/huggingface/transformers.git@refs/pull/36126/head
|
7 |
-
https://github.com/Dao-AILab/flash-attention/releases/download/v2.5.9.post1/flash_attn-2.5.9.post1+cu118torch1.12cxx11abiFALSE-cp310-cp310-linux_x86_64.whl
|
8 |
spaces
|
9 |
num2words
|
10 |
decord
|
|
|
4 |
huggingface_hub
|
5 |
gradio
|
6 |
transformers@git+https://github.com/huggingface/transformers.git@refs/pull/36126/head
|
7 |
+
# https://github.com/Dao-AILab/flash-attention/releases/download/v2.5.9.post1/flash_attn-2.5.9.post1+cu118torch1.12cxx11abiFALSE-cp310-cp310-linux_x86_64.whl
|
8 |
spaces
|
9 |
num2words
|
10 |
decord
|