Update README.md
Browse files
README.md
CHANGED
@@ -168,7 +168,8 @@ import requests
|
|
168 |
from transformers import AutoModelForCausalLM, AutoProcessor
|
169 |
|
170 |
# Load the model and processor
|
171 |
-
|
|
|
172 |
processor = AutoProcessor.from_pretrained("microsoft/Magma-8B", trust_remote_code=True)
|
173 |
model.to("cuda")
|
174 |
|
@@ -185,7 +186,7 @@ prompt = processor.tokenizer.apply_chat_template(convs, tokenize=False, add_gene
|
|
185 |
inputs = processor(images=[image], texts=prompt, return_tensors="pt")
|
186 |
inputs['pixel_values'] = inputs['pixel_values'].unsqueeze(0)
|
187 |
inputs['image_sizes'] = inputs['image_sizes'].unsqueeze(0)
|
188 |
-
inputs = inputs.to("cuda")
|
189 |
|
190 |
generation_args = {
|
191 |
"max_new_tokens": 128,
|
|
|
168 |
from transformers import AutoModelForCausalLM, AutoProcessor
|
169 |
|
170 |
# Load the model and processor
|
171 |
+
dtype = torch.bfloat16
|
172 |
+
model = AutoModelForCausalLM.from_pretrained("microsoft/Magma-8B", trust_remote_code=True, torch_dtype=dtype)
|
173 |
processor = AutoProcessor.from_pretrained("microsoft/Magma-8B", trust_remote_code=True)
|
174 |
model.to("cuda")
|
175 |
|
|
|
186 |
inputs = processor(images=[image], texts=prompt, return_tensors="pt")
|
187 |
inputs['pixel_values'] = inputs['pixel_values'].unsqueeze(0)
|
188 |
inputs['image_sizes'] = inputs['image_sizes'].unsqueeze(0)
|
189 |
+
inputs = inputs.to("cuda").to(dtype)
|
190 |
|
191 |
generation_args = {
|
192 |
"max_new_tokens": 128,
|