Spaces:
Runtime error
Runtime error
Update with md5sum and half precision inference
Browse files- app.py +1 -0
- label_prettify.py +1 -0
- prismer/configs/experts.yaml +1 -1
- prismer_model.py +5 -4
app.py
CHANGED
|
@@ -20,6 +20,7 @@ description = """
|
|
| 20 |
# Prismer
|
| 21 |
The official demo for **Prismer: A Vision-Language Model with An Ensemble of Experts**.
|
| 22 |
Please refer to our [project page](https://shikun.io/projects/prismer) or [github](https://github.com/NVlabs/prismer) for more details.
|
|
|
|
| 23 |
"""
|
| 24 |
|
| 25 |
if (SPACE_ID := os.getenv('SPACE_ID')) is not None:
|
|
|
|
| 20 |
# Prismer
|
| 21 |
The official demo for **Prismer: A Vision-Language Model with An Ensemble of Experts**.
|
| 22 |
Please refer to our [project page](https://shikun.io/projects/prismer) or [github](https://github.com/NVlabs/prismer) for more details.
|
| 23 |
+
Expert labels will be only computed once for the same image checked with md5sum.
|
| 24 |
"""
|
| 25 |
|
| 26 |
if (SPACE_ID := os.getenv('SPACE_ID')) is not None:
|
label_prettify.py
CHANGED
|
@@ -8,6 +8,7 @@ import numpy as np
|
|
| 8 |
import shutil
|
| 9 |
|
| 10 |
from prismer.utils import create_ade20k_label_colormap
|
|
|
|
| 11 |
|
| 12 |
obj_label_map = torch.load('prismer/dataset/detection_features.pt')['labels']
|
| 13 |
coco_label_map = torch.load('prismer/dataset/coco_features.pt')['labels']
|
|
|
|
| 8 |
import shutil
|
| 9 |
|
| 10 |
from prismer.utils import create_ade20k_label_colormap
|
| 11 |
+
matplotlib.use('agg')
|
| 12 |
|
| 13 |
obj_label_map = torch.load('prismer/dataset/detection_features.pt')['labels']
|
| 14 |
coco_label_map = torch.load('prismer/dataset/coco_features.pt')['labels']
|
prismer/configs/experts.yaml
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
data_path: helpers
|
| 2 |
-
im_name:
|
| 3 |
save_path: helpers/labels
|
|
|
|
| 1 |
data_path: helpers
|
| 2 |
+
im_name: ca2a8e1305af24483124b85c53bd24b3
|
| 3 |
save_path: helpers/labels
|
prismer_model.py
CHANGED
|
@@ -34,10 +34,10 @@ def download_models() -> None:
|
|
| 34 |
subprocess.run(shlex.split('python download_checkpoints.py --download_experts=True'), cwd='prismer')
|
| 35 |
|
| 36 |
model_names = [
|
| 37 |
-
'vqa_prismer_base',
|
| 38 |
-
'vqa_prismer_large',
|
| 39 |
'pretrain_prismer_base',
|
| 40 |
-
'pretrain_prismer_large',
|
| 41 |
]
|
| 42 |
for model_name in model_names:
|
| 43 |
if pathlib.Path(f'prismer/logging/{model_name}').exists():
|
|
@@ -78,6 +78,7 @@ def run_experts(image_path: str) -> Tuple[str, Tuple[str, ...]]:
|
|
| 78 |
|
| 79 |
config = yaml.load(open('prismer/configs/experts.yaml', 'r'), Loader=yaml.Loader)
|
| 80 |
config['im_name'] = im_name
|
|
|
|
| 81 |
with open('prismer/configs/experts.yaml', 'w') as yaml_file:
|
| 82 |
yaml.dump(config, yaml_file, default_flow_style=False)
|
| 83 |
|
|
@@ -89,7 +90,7 @@ def run_experts(image_path: str) -> Tuple[str, Tuple[str, ...]]:
|
|
| 89 |
run_expert('depth')
|
| 90 |
with concurrent.futures.ProcessPoolExecutor() as executor:
|
| 91 |
executor.map(run_expert, expert_names)
|
| 92 |
-
|
| 93 |
|
| 94 |
# no parallelization just to be safe
|
| 95 |
# expert_names = ['depth', 'edge', 'normal', 'objdet', 'ocrdet', 'segmentation']
|
|
|
|
| 34 |
subprocess.run(shlex.split('python download_checkpoints.py --download_experts=True'), cwd='prismer')
|
| 35 |
|
| 36 |
model_names = [
|
| 37 |
+
# 'vqa_prismer_base',
|
| 38 |
+
# 'vqa_prismer_large',
|
| 39 |
'pretrain_prismer_base',
|
| 40 |
+
# 'pretrain_prismer_large',
|
| 41 |
]
|
| 42 |
for model_name in model_names:
|
| 43 |
if pathlib.Path(f'prismer/logging/{model_name}').exists():
|
|
|
|
| 78 |
|
| 79 |
config = yaml.load(open('prismer/configs/experts.yaml', 'r'), Loader=yaml.Loader)
|
| 80 |
config['im_name'] = im_name
|
| 81 |
+
print(im_name)
|
| 82 |
with open('prismer/configs/experts.yaml', 'w') as yaml_file:
|
| 83 |
yaml.dump(config, yaml_file, default_flow_style=False)
|
| 84 |
|
|
|
|
| 90 |
run_expert('depth')
|
| 91 |
with concurrent.futures.ProcessPoolExecutor() as executor:
|
| 92 |
executor.map(run_expert, expert_names)
|
| 93 |
+
executor.shutdown(wait=True)
|
| 94 |
|
| 95 |
# no parallelization just to be safe
|
| 96 |
# expert_names = ['depth', 'edge', 'normal', 'objdet', 'ocrdet', 'segmentation']
|