Spaces:
Sleeping
Sleeping
Implemented pruning
Browse files
app.py
CHANGED
@@ -12,6 +12,7 @@ from tqdm import tqdm
|
|
12 |
import speech_recognition as sr
|
13 |
from gtts import gTTS
|
14 |
import tempfile
|
|
|
15 |
|
16 |
# Configure logging
|
17 |
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
|
@@ -27,12 +28,10 @@ class ImageSearchSystem:
|
|
27 |
self.model = CLIPModel.from_pretrained("openai/clip-vit-base-patch16").to(self.device)
|
28 |
|
29 |
# Prune the model
|
30 |
-
|
31 |
-
|
32 |
-
num_heads_to_prune=10,
|
33 |
-
num_layers_to_prune=2,
|
34 |
)
|
35 |
-
|
36 |
|
37 |
# Initialize dataset
|
38 |
self.image_paths = []
|
|
|
12 |
import speech_recognition as sr
|
13 |
from gtts import gTTS
|
14 |
import tempfile
|
15 |
+
import torch.nn.utils.prune as prune
|
16 |
|
17 |
# Configure logging
|
18 |
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
|
|
|
28 |
self.model = CLIPModel.from_pretrained("openai/clip-vit-base-patch16").to(self.device)
|
29 |
|
30 |
# Prune the model
|
31 |
+
parameters_to_prune = (
|
32 |
+
(self.model.visual.encoder.layers, 'weight'),
|
|
|
|
|
33 |
)
|
34 |
+
prune.l1_unstructured(parameters_to_prune, amount=0.2)
|
35 |
|
36 |
# Initialize dataset
|
37 |
self.image_paths = []
|