tonywu71 commited on
Commit
a12a745
·
verified ·
1 Parent(s): 942a97d

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +5 -5
README.md CHANGED
@@ -65,11 +65,11 @@ from transformers.utils.import_utils import is_flash_attn_2_available
65
  from colpali_engine.models import ColQwen2, ColQwen2Processor
66
 
67
  model = ColQwen2.from_pretrained(
68
- "vidore/colqwen2-v1.0",
69
- torch_dtype=torch.bfloat16,
70
- device_map="cuda:0", # or "mps" if on Apple Silicon
71
- attn_implementation="flash_attention_2" if is_flash_attn_2_available() else None, # or "eager" if "mps"
72
- ).eval()
73
  processor = ColQwen2Processor.from_pretrained("vidore/colqwen2-v1.0")
74
 
75
  # Your inputs
 
65
  from colpali_engine.models import ColQwen2, ColQwen2Processor
66
 
67
  model = ColQwen2.from_pretrained(
68
+ "vidore/colqwen2-v1.0",
69
+ torch_dtype=torch.bfloat16,
70
+ device_map="cuda:0", # or "mps" if on Apple Silicon
71
+ attn_implementation="flash_attention_2" if is_flash_attn_2_available() else None, # or "eager" if "mps"
72
+ ).eval()
73
  processor = ColQwen2Processor.from_pretrained("vidore/colqwen2-v1.0")
74
 
75
  # Your inputs