lixinhao commited on
Commit
c17ed00
·
verified ·
1 Parent(s): 816c629

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +2 -1
README.md CHANGED
@@ -109,12 +109,13 @@ pip install flash-attn --no-build-isolation
109
  Then you could use our model:
110
  ```python
111
  from transformers import AutoModel, AutoTokenizer
 
112
 
113
  # model setting
114
  model_path = 'OpenGVLab/VideoChat-Flash-Qwen2-7B_res448'
115
 
116
  tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
117
- model = AutoModel.from_pretrained(model_path, trust_remote_code=True).half().cuda()
118
  image_processor = model.get_vision_tower().image_processor
119
 
120
  mm_llm_compress = False # use the global compress or not
 
109
  Then you could use our model:
110
  ```python
111
  from transformers import AutoModel, AutoTokenizer
112
+ import torch
113
 
114
  # model setting
115
  model_path = 'OpenGVLab/VideoChat-Flash-Qwen2-7B_res448'
116
 
117
  tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
118
+ model = AutoModel.from_pretrained(model_path, trust_remote_code=True).to(torch.bfloat16).cuda()
119
  image_processor = model.get_vision_tower().image_processor
120
 
121
  mm_llm_compress = False # use the global compress or not