drlon commited on
Commit
cf2f76b
·
1 Parent(s): 7e82084

Convert pixel_values to BFloat16 before moving to CUDA

Browse files
Files changed (1) hide show
  1. app.py +3 -10
app.py CHANGED
@@ -93,21 +93,14 @@ def get_som_response(instruction, image_som):
93
  add_generation_prompt=True
94
  )
95
 
96
- # inputs = magma_processor(images=[image_som], texts=prompt, return_tensors="pt")
97
- # inputs['pixel_values'] = inputs['pixel_values'].unsqueeze(0)
98
- # inputs['image_sizes'] = inputs['image_sizes'].unsqueeze(0)
99
- # inputs = inputs.to("cuda")
100
-
101
  inputs = magma_processor(images=[image_som], texts=prompt, return_tensors="pt")
102
  inputs['pixel_values'] = inputs['pixel_values'].unsqueeze(0)
 
 
103
  inputs['image_sizes'] = inputs['image_sizes'].unsqueeze(0)
104
- # Convert all tensors to BFloat16 before moving to CUDA
105
- for key in inputs:
106
- if torch.is_tensor(inputs[key]):
107
- inputs[key] = inputs[key].to(torch.bfloat16)
108
- # Then move to CUDA device
109
  inputs = inputs.to("cuda")
110
 
 
111
  magam_model.generation_config.pad_token_id = magma_processor.tokenizer.pad_token_id
112
  with torch.inference_mode():
113
  output_ids = magam_model.generate(
 
93
  add_generation_prompt=True
94
  )
95
 
 
 
 
 
 
96
  inputs = magma_processor(images=[image_som], texts=prompt, return_tensors="pt")
97
  inputs['pixel_values'] = inputs['pixel_values'].unsqueeze(0)
98
+ # Convert pixel_values to BFloat16 before moving to CUDA
99
+ inputs['pixel_values'] = inputs['pixel_values'].to(torch.bfloat16)
100
  inputs['image_sizes'] = inputs['image_sizes'].unsqueeze(0)
 
 
 
 
 
101
  inputs = inputs.to("cuda")
102
 
103
+
104
  magam_model.generation_config.pad_token_id = magma_processor.tokenizer.pad_token_id
105
  with torch.inference_mode():
106
  output_ids = magam_model.generate(