linhaotong
commited on
Commit
·
5ceac46
1
Parent(s):
88040a8
update transformers doc
Browse files
README.md
CHANGED
@@ -25,19 +25,31 @@ pip install -e .
|
|
25 |
## Usage
|
26 |
|
27 |
```python
|
28 |
-
from promptda.promptda import PromptDA
|
29 |
-
from promptda.utils.io_wrapper import load_image, load_depth, save_depth
|
|
|
|
|
|
|
30 |
|
31 |
-
|
32 |
-
|
33 |
-
prompt_depth_path = "assets/example_images/arkit_depth.png"
|
34 |
-
image = load_image(image_path).to(DEVICE)
|
35 |
-
prompt_depth = load_depth(prompt_depth_path).to(DEVICE) # 192x256, ARKit LiDAR depth in meters
|
36 |
|
37 |
-
model = PromptDA.from_pretrained("depth-anything/prompt-depth-anything-vits").to(DEVICE).eval()
|
38 |
-
depth = model.predict(image, prompt_depth) # HxW, depth in meters
|
39 |
|
40 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
41 |
```
|
42 |
|
43 |
## Citation
|
|
|
25 |
## Usage
|
26 |
|
27 |
```python
|
28 |
+
# from promptda.promptda import PromptDA
|
29 |
+
# from promptda.utils.io_wrapper import load_image, load_depth, save_depth
|
30 |
+
import requests
|
31 |
+
from PIL import Image
|
32 |
+
from transformers import PromptDepthAnythingForDepthEstimation, PromptDepthAnythingImageProcessor
|
33 |
|
34 |
+
url = "https://github.com/DepthAnything/PromptDA/blob/main/assets/example_images/image.jpg?raw=true"
|
35 |
+
image = Image.open(requests.get(url, stream=True).raw)
|
|
|
|
|
|
|
36 |
|
|
|
|
|
37 |
|
38 |
+
image_processor = PromptDepthAnythingImageProcessor.from_pretrained("depth-anything/prompt-depth-anything-vits-hf")
|
39 |
+
model = PromptDepthAnythingForDepthEstimation.from_pretrained("depth-anything/prompt-depth-anything-vits-hf")
|
40 |
+
|
41 |
+
prompt_depth_url = "https://github.com/DepthAnything/PromptDA/blob/main/assets/example_images/arkit_depth.png?raw=true"
|
42 |
+
prompt_depth = Image.open(requests.get(prompt_depth_url, stream=True).raw)
|
43 |
+
|
44 |
+
inputs = image_processor(images=image, return_tensors="pt", prompt_depth=prompt_depth)
|
45 |
+
with torch.no_grad():
|
46 |
+
outputs = model(**inputs)
|
47 |
+
post_processed_output = image_processor.post_process_depth_estimation(
|
48 |
+
outputs,
|
49 |
+
target_sizes=[(image.height, image.width)],
|
50 |
+
)
|
51 |
+
|
52 |
+
predicted_depth = post_processed_output[0]["predicted_depth"]
|
53 |
```
|
54 |
|
55 |
## Citation
|