sohamnk commited on
Commit
e678a10
Β·
verified Β·
1 Parent(s): a9999ab

Update pipeline/__init__.py

Browse files
Files changed (1) hide show
  1. pipeline/__init__.py +18 -18
pipeline/__init__.py CHANGED
@@ -1,22 +1,29 @@
1
  import os
2
  import torch
3
  from flask import Flask
4
- from sentence_transformers.cross_encoder import CrossEncoder
5
 
6
  FEATURE_WEIGHTS = {"shape": 0.4, "color": 0.5, "texture": 0.1}
7
  FINAL_SCORE_THRESHOLD = 0.5
8
 
9
- # create flask app
10
  app = Flask(__name__)
11
 
12
- # load models
13
- print("="*50)
14
  print("πŸš€ Initializing application and loading models...")
 
15
  device_name = os.environ.get("device", "cpu")
16
- device = torch.device('cuda' if 'cuda' in device_name and torch.cuda.is_available() else 'cpu')
 
 
17
  print(f"🧠 Using device: {device}")
18
 
19
- from transformers import AutoProcessor, AutoModelForZeroShotObjectDetection, AutoTokenizer, AutoModel
 
 
 
 
 
20
  from segment_anything import SamPredictor, sam_model_registry
21
 
22
  print("...Loading Grounding DINO model...")
@@ -25,6 +32,7 @@ processor_gnd = AutoProcessor.from_pretrained(gnd_model_id)
25
  model_gnd = AutoModelForZeroShotObjectDetection.from_pretrained(gnd_model_id).to(device)
26
 
27
  print("...Loading Segment Anything (SAM) model...")
 
28
  sam_checkpoint = "sam_vit_b_01ec64.pth"
29
  sam_model = sam_model_registry["vit_b"](checkpoint=sam_checkpoint).to(device)
30
  predictor = SamPredictor(sam_model)
@@ -34,7 +42,7 @@ bge_model_id = "BAAI/bge-small-en-v1.5"
34
  tokenizer_text = AutoTokenizer.from_pretrained(bge_model_id)
35
  model_text = AutoModel.from_pretrained(bge_model_id).to(device)
36
 
37
-
38
  models = {
39
  "processor_gnd": processor_gnd,
40
  "model_gnd": model_gnd,
@@ -44,16 +52,8 @@ models = {
44
  "device": device
45
  }
46
 
47
-
48
- print("...Loading Cross-Encoder model for re-ranking...")
49
-
50
- cross_encoder_model = CrossEncoder('cross-encoder/ms-marco-MiniLM-L-6-v2', device=device)
51
-
52
-
53
- models["cross_encoder"] = cross_encoder_model
54
-
55
-
56
  print("βœ… All models loaded successfully.")
57
- print("="*50)
58
 
59
- from pipeline import routes
 
 
1
  import os
2
  import torch
3
  from flask import Flask
 
4
 
5
  FEATURE_WEIGHTS = {"shape": 0.4, "color": 0.5, "texture": 0.1}
6
  FINAL_SCORE_THRESHOLD = 0.5
7
 
8
+ # Create Flask app
9
  app = Flask(__name__)
10
 
11
+ # Load models
12
+ print("=" * 50)
13
  print("πŸš€ Initializing application and loading models...")
14
+
15
  device_name = os.environ.get("device", "cpu")
16
+ device = torch.device(
17
+ 'cuda' if 'cuda' in device_name and torch.cuda.is_available() else 'cpu'
18
+ )
19
  print(f"🧠 Using device: {device}")
20
 
21
+ from transformers import (
22
+ AutoProcessor,
23
+ AutoModelForZeroShotObjectDetection,
24
+ AutoTokenizer,
25
+ AutoModel
26
+ )
27
  from segment_anything import SamPredictor, sam_model_registry
28
 
29
  print("...Loading Grounding DINO model...")
 
32
  model_gnd = AutoModelForZeroShotObjectDetection.from_pretrained(gnd_model_id).to(device)
33
 
34
  print("...Loading Segment Anything (SAM) model...")
35
+ # IMPORTANT: The path is now relative to the root of the project
36
  sam_checkpoint = "sam_vit_b_01ec64.pth"
37
  sam_model = sam_model_registry["vit_b"](checkpoint=sam_checkpoint).to(device)
38
  predictor = SamPredictor(sam_model)
 
42
  tokenizer_text = AutoTokenizer.from_pretrained(bge_model_id)
43
  model_text = AutoModel.from_pretrained(bge_model_id).to(device)
44
 
45
+ # Store models in a dictionary to pass to logic functions
46
  models = {
47
  "processor_gnd": processor_gnd,
48
  "model_gnd": model_gnd,
 
52
  "device": device
53
  }
54
 
 
 
 
 
 
 
 
 
 
55
  print("βœ… All models loaded successfully.")
56
+ print("=" * 50)
57
 
58
+ # Import routes after app and models are defined to avoid circular imports
59
+ from pipeline import routes