joy1515 commited on
Commit
8d6e541
·
verified ·
1 Parent(s): a2fb18c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -6
app.py CHANGED
@@ -22,21 +22,25 @@ class ImageSearchSystem:
22
  def __init__(self):
23
  self.device = "cuda" if torch.cuda.is_available() else "cpu"
24
  logger.info(f"Using device: {self.device}")
25
-
26
  # Load CLIP model
27
  self.processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch16")
28
  self.model = CLIPModel.from_pretrained("openai/clip-vit-base-patch16").to(self.device)
29
 
30
- # Prune the model (access vision module directly)
31
  parameters_to_prune = (
32
- (self.model.vision.transformer.encoder.layers, 'attention.self.query.weight'),
33
- (self.model.vision.transformer.encoder.layers, 'attention.self.key.weight'),
34
- (self.model.vision.transformer.encoder.layers, 'attention.self.value.weight')
35
  )
36
 
37
  # Prune the weights
38
  prune.l1_unstructured(parameters_to_prune, amount=0.2)
39
-
 
 
 
 
40
 
41
  # Initialize dataset
42
  self.image_paths = []
 
22
  def __init__(self):
23
  self.device = "cuda" if torch.cuda.is_available() else "cpu"
24
  logger.info(f"Using device: {self.device}")
25
+
26
  # Load CLIP model
27
  self.processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch16")
28
  self.model = CLIPModel.from_pretrained("openai/clip-vit-base-patch16").to(self.device)
29
 
30
+ # Prune the model (access vision module correctly)
31
  parameters_to_prune = (
32
+ (self.model.vision_model.encoder.layers, 'attention.self.query.weight'),
33
+ (self.model.vision_model.encoder.layers, 'attention.self.key.weight'),
34
+ (self.model.vision_model.encoder.layers, 'attention.self.value.weight')
35
  )
36
 
37
  # Prune the weights
38
  prune.l1_unstructured(parameters_to_prune, amount=0.2)
39
+
40
+ # Initialize dataset
41
+ self.image_paths = []
42
+ self.index = None
43
+ self.initialized = False
44
 
45
  # Initialize dataset
46
  self.image_paths = []