banao-tech commited on
Commit
0041f4c
·
verified ·
1 Parent(s): a266906

Update main.py

Browse files
Files changed (1) hide show
  1. main.py +12 -13
main.py CHANGED
@@ -36,25 +36,24 @@ from ultralytics import YOLO
36
  #except:
37
  #yolo_model = YOLO("best.pt")
38
 
39
- from transformers import AutoProcessor, AutoModelForCausalLM
40
-
41
- processor = AutoProcessor.from_pretrained(
42
- "microsoft/Florence-2-base", trust_remote_code=True
43
- )
44
 
 
45
  try:
46
- model = AutoModelForCausalLM.from_pretrained(
 
47
  "microsoft/OmniParser",
48
- torch_dtype=torch.float16,
49
- trust_remote_code=True,
50
  ).to("cuda")
51
- except:
52
- model = AutoModelForCausalLM.from_pretrained(
 
 
53
  "microsoft/OmniParser",
54
  torch_dtype=torch.float16,
55
- trust_remote_code=True,
56
- )
57
- caption_model_processor = {"processor": processor, "model": model}
58
  print("finish loading model!!!")
59
 
60
  app = FastAPI()
 
36
  #except:
37
  #yolo_model = YOLO("best.pt")
38
 
 
 
 
 
 
39
 
40
+ # Correctly load the processor and model for Blip-2
41
  try:
42
+ processor = AutoProcessor.from_pretrained("microsoft/Florence-2-base", trust_remote_code=True)
43
+ model = Blip2ForConditionalGeneration.from_pretrained(
44
  "microsoft/OmniParser",
45
+ torch_dtype=torch.float16, # Assuming you're using a GPU
46
+ trust_remote_code=True
47
  ).to("cuda")
48
+ except Exception as e:
49
+ print(f"Error loading caption model: {e}")
50
+ processor = AutoProcessor.from_pretrained("microsoft/Florence-2-base", trust_remote_code=True)
51
+ model = Blip2ForConditionalGeneration.from_pretrained(
52
  "microsoft/OmniParser",
53
  torch_dtype=torch.float16,
54
+ trust_remote_code=True
55
+ ).to("cpu") # Fallback to CPU if CUDA fails
56
+
57
  print("finish loading model!!!")
58
 
59
  app = FastAPI()