Sutirtha commited on
Commit
94e7ee0
·
verified ·
1 Parent(s): 6227239

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -5
app.py CHANGED
@@ -15,10 +15,9 @@ warnings.filterwarnings("ignore", category=UserWarning)
15
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
16
  print(f"Using device: {device}")
17
 
18
- # Load the LangSAM model and move it to the selected device
19
- model = LangSAM()
20
- model.to(device)
21
- model.eval() # Set model to evaluation mode
22
 
23
  def extract_masks(image_pil, prompts):
24
  """
@@ -35,7 +34,7 @@ def extract_masks(image_pil, prompts):
35
  masks_dict = {}
36
  with torch.no_grad(): # Disable gradient computation for inference
37
  for prompt in prompts_list:
38
- # Ensure the model uses the correct device
39
  masks, boxes, phrases, logits = model.predict(image_pil, prompt)
40
  if masks is not None and len(masks) > 0:
41
  # Move masks to CPU and convert to numpy
 
15
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
16
  print(f"Using device: {device}")
17
 
18
+ # Load the LangSAM model
19
+ model = LangSAM() # Use the default model or specify custom checkpoint if necessary
20
+ # Note: Removed model.to(device) since LangSAM does not support it
 
21
 
22
  def extract_masks(image_pil, prompts):
23
  """
 
34
  masks_dict = {}
35
  with torch.no_grad(): # Disable gradient computation for inference
36
  for prompt in prompts_list:
37
+ # Ensure the model uses the correct device internally
38
  masks, boxes, phrases, logits = model.predict(image_pil, prompt)
39
  if masks is not None and len(masks) > 0:
40
  # Move masks to CPU and convert to numpy