prithivMLmods commited on
Commit
b22a631
·
verified ·
1 Parent(s): 14e4776

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +2 -2
README.md CHANGED
@@ -51,7 +51,7 @@ from qwen_vl_utils import process_vision_info
51
 
52
  # default: Load the model on the available device(s)
53
  model = Qwen2VLForConditionalGeneration.from_pretrained(
54
- "Qwen/Qwen2-VL-2B-Instruct", torch_dtype="auto", device_map="auto"
55
  )
56
 
57
  # We recommend enabling flash_attention_2 for better acceleration and memory saving, especially in multi-image and video scenarios.
@@ -63,7 +63,7 @@ model = Qwen2VLForConditionalGeneration.from_pretrained(
63
  # )
64
 
65
  # default processer
66
- processor = AutoProcessor.from_pretrained("Qwen/Qwen2-VL-2B-Instruct")
67
 
68
  # The default range for the number of visual tokens per image in the model is 4-16384. You can set min_pixels and max_pixels according to your needs, such as a token count range of 256-1280, to balance speed and memory usage.
69
  # min_pixels = 256*28*28
 
51
 
52
  # default: Load the model on the available device(s)
53
  model = Qwen2VLForConditionalGeneration.from_pretrained(
54
+ "prithivMLmods/Qwen2-VL-OCR-2B-Instruct", torch_dtype="auto", device_map="auto"
55
  )
56
 
57
  # We recommend enabling flash_attention_2 for better acceleration and memory saving, especially in multi-image and video scenarios.
 
63
  # )
64
 
65
  # default processer
66
+ processor = AutoProcessor.from_pretrained("prithivMLmods/Qwen2-VL-OCR-2B-Instruct")
67
 
68
  # The default range for the number of visual tokens per image in the model is 4-16384. You can set min_pixels and max_pixels according to your needs, such as a token count range of 256-1280, to balance speed and memory usage.
69
  # min_pixels = 256*28*28