Update README.md
Browse files
README.md
CHANGED
@@ -13,4 +13,85 @@ widget:
|
|
13 |
example_title: Mammouths
|
14 |
- src: examples/synth.png
|
15 |
example_title: Synth
|
16 |
-
---
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
13 |
example_title: Mammouths
|
14 |
- src: examples/synth.png
|
15 |
example_title: Synth
|
16 |
+
---
|
17 |
+
|
18 |
+
# Model card for REVA-QCAV
|
19 |
+
|
20 |
+
## Model Usage
|
21 |
+
|
22 |
+
### Object Detection (using `transformers
|
23 |
+
|
24 |
+
```python
|
25 |
+
from transformers import AutoImageProcessor, AutoModelForObjectDetection
|
26 |
+
from huggingface_hub import hf_hub_download
|
27 |
+
from PIL import Image
|
28 |
+
import torch
|
29 |
+
|
30 |
+
# download example image
|
31 |
+
img_path = hf_hub_download(repo_id="1aurent/REVA-QCAV", filename="examples/chevaux.jpg")
|
32 |
+
img = Image.open(img_path)
|
33 |
+
|
34 |
+
# transform image using image_processor
|
35 |
+
image_processor = AutoImageProcessor.from_pretrained("1aurent/REVA-QCAV")
|
36 |
+
data = image_processor(img, return_tensors="pt")
|
37 |
+
|
38 |
+
# get outputs from the model
|
39 |
+
model = AutoModelForObjectDetection.from_pretrained("1aurent/REVA-QCAV")
|
40 |
+
with torch.no_grad():
|
41 |
+
output = model(**data)
|
42 |
+
|
43 |
+
# use image_processor post processing
|
44 |
+
img_CHW = torch.tensor([img.height, img.width]).unsqueeze(0)
|
45 |
+
output_processed = image_processor.post_process_object_detection(output, threshold=0.9, target_sizes=img_CHW)
|
46 |
+
```
|
47 |
+
|
48 |
+
### Object Detection (using `onnxruntime`)
|
49 |
+
|
50 |
+
```python
|
51 |
+
from transformers.models.detr.modeling_detr import DetrObjectDetectionOutput
|
52 |
+
from transformers import AutoImageProcessor
|
53 |
+
from huggingface_hub import hf_hub_download
|
54 |
+
import onnxruntime as ort
|
55 |
+
from PIL import Image
|
56 |
+
import torch
|
57 |
+
|
58 |
+
# download onnx and start inference session
|
59 |
+
onnx_path = hf_hub_download(repo_id="1aurent/REVA-QCAV", filename="model.onnx")
|
60 |
+
session = ort.InferenceSession(onnx_path)
|
61 |
+
|
62 |
+
# download example image
|
63 |
+
img_path = hf_hub_download(repo_id="1aurent/REVA-QCAV", filename="examples/chevaux.jpg")
|
64 |
+
img = Image.open(img_path)
|
65 |
+
|
66 |
+
# transform image using image_processor
|
67 |
+
image_processor = AutoImageProcessor.from_pretrained("1aurent/REVA-QCAV")
|
68 |
+
data = image_processor(img, return_tensors="np").data
|
69 |
+
|
70 |
+
# get logits and bbox predictions using onnx session
|
71 |
+
logits, pred_boxes = session.run(
|
72 |
+
output_names=["logits", "pred_boxes"],
|
73 |
+
input_feed=data,
|
74 |
+
)
|
75 |
+
|
76 |
+
# wrap outputs inside DetrObjectDetectionOutput
|
77 |
+
output = DetrObjectDetectionOutput(
|
78 |
+
logits=torch.tensor(logits),
|
79 |
+
pred_boxes=torch.tensor(pred_boxes),
|
80 |
+
)
|
81 |
+
|
82 |
+
# use image_processor post processing
|
83 |
+
img_CHW = torch.tensor([img.height, img.width]).unsqueeze(0)
|
84 |
+
output_processed = image_processor.post_process_object_detection(output, threshold=0.9, target_sizes=img_CHW)
|
85 |
+
```
|
86 |
+
|
87 |
+
## Citation
|
88 |
+
|
89 |
+
```bibtex
|
90 |
+
@article{reva-qcav,
|
91 |
+
author = {Laurent Fainsin and Jean Mélou and Lilian Calvet and Antoine Laurent and Axel Carlier and Jean-Denis Durou},
|
92 |
+
title = {Neural sphere detection in images for lighting calibration},
|
93 |
+
journal = {QCAV},
|
94 |
+
year = {2023},
|
95 |
+
url = {https://hal.science/hal-04160733}
|
96 |
+
}
|
97 |
+
```
|