Spaces:
Runtime error
Runtime error
update app.py
Browse files
app.py
CHANGED
|
@@ -78,7 +78,7 @@ This demo is powered by [Gradio](https://gradio.app/) and uses OmniParserv2 to g
|
|
| 78 |
DEVICE = torch.device('cuda')
|
| 79 |
|
| 80 |
@spaces.GPU
|
| 81 |
-
|
| 82 |
def get_som_response(instruction, image_som):
|
| 83 |
prompt = magma_som_prompt.format(instruction)
|
| 84 |
if magam_model.config.mm_use_image_start_end:
|
|
@@ -115,7 +115,7 @@ def get_som_response(instruction, image_som):
|
|
| 115 |
return response
|
| 116 |
|
| 117 |
@spaces.GPU
|
| 118 |
-
|
| 119 |
def get_qa_response(instruction, image):
|
| 120 |
prompt = magma_qa_prompt.format(instruction)
|
| 121 |
if magam_model.config.mm_use_image_start_end:
|
|
@@ -152,7 +152,7 @@ def get_qa_response(instruction, image):
|
|
| 152 |
return response
|
| 153 |
|
| 154 |
@spaces.GPU
|
| 155 |
-
|
| 156 |
# @torch.autocast(device_type="cuda", dtype=torch.bfloat16)
|
| 157 |
def process(
|
| 158 |
image_input,
|
|
|
|
| 78 |
DEVICE = torch.device('cuda')
|
| 79 |
|
| 80 |
@spaces.GPU
|
| 81 |
+
@torch.inference_mode()
|
| 82 |
def get_som_response(instruction, image_som):
|
| 83 |
prompt = magma_som_prompt.format(instruction)
|
| 84 |
if magam_model.config.mm_use_image_start_end:
|
|
|
|
| 115 |
return response
|
| 116 |
|
| 117 |
@spaces.GPU
|
| 118 |
+
@torch.inference_mode()
|
| 119 |
def get_qa_response(instruction, image):
|
| 120 |
prompt = magma_qa_prompt.format(instruction)
|
| 121 |
if magam_model.config.mm_use_image_start_end:
|
|
|
|
| 152 |
return response
|
| 153 |
|
| 154 |
@spaces.GPU
|
| 155 |
+
@torch.inference_mode()
|
| 156 |
# @torch.autocast(device_type="cuda", dtype=torch.bfloat16)
|
| 157 |
def process(
|
| 158 |
image_input,
|