drlon commited on
Commit
7e82084
·
1 Parent(s): caf8c91

update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -0
app.py CHANGED
@@ -93,9 +93,19 @@ def get_som_response(instruction, image_som):
93
  add_generation_prompt=True
94
  )
95
 
 
 
 
 
 
96
  inputs = magma_processor(images=[image_som], texts=prompt, return_tensors="pt")
97
  inputs['pixel_values'] = inputs['pixel_values'].unsqueeze(0)
98
  inputs['image_sizes'] = inputs['image_sizes'].unsqueeze(0)
 
 
 
 
 
99
  inputs = inputs.to("cuda")
100
 
101
  magam_model.generation_config.pad_token_id = magma_processor.tokenizer.pad_token_id
 
93
  add_generation_prompt=True
94
  )
95
 
96
+ # inputs = magma_processor(images=[image_som], texts=prompt, return_tensors="pt")
97
+ # inputs['pixel_values'] = inputs['pixel_values'].unsqueeze(0)
98
+ # inputs['image_sizes'] = inputs['image_sizes'].unsqueeze(0)
99
+ # inputs = inputs.to("cuda")
100
+
101
  inputs = magma_processor(images=[image_som], texts=prompt, return_tensors="pt")
102
  inputs['pixel_values'] = inputs['pixel_values'].unsqueeze(0)
103
  inputs['image_sizes'] = inputs['image_sizes'].unsqueeze(0)
104
+ # Convert all tensors to BFloat16 before moving to CUDA
105
+ for key in inputs:
106
+ if torch.is_tensor(inputs[key]):
107
+ inputs[key] = inputs[key].to(torch.bfloat16)
108
+ # Then move to CUDA device
109
  inputs = inputs.to("cuda")
110
 
111
  magam_model.generation_config.pad_token_id = magma_processor.tokenizer.pad_token_id