Pierre Chapuis commited on
Commit
06ee974
·
unverified ·
1 Parent(s): 664105d

rye -> uv + fix pillow_heif issue

Browse files
Files changed (5) hide show
  1. pyproject.toml +8 -6
  2. requirements.lock +0 -253
  3. requirements.txt +2 -2
  4. src/app.py +8 -5
  5. uv.lock +0 -0
pyproject.toml CHANGED
@@ -8,8 +8,8 @@ authors = [
8
  dependencies = [
9
  "gradio>=5.27.1",
10
  "gradio-image-annotation>=0.2.5",
11
- "pillow>=10.4.0",
12
- "pillow-heif>=0.18.0",
13
  "refiners @ git+https://github.com/finegrain-ai/refiners",
14
  "numba>=0.60.0",
15
  "pymatting>=1.1.12",
@@ -24,10 +24,6 @@ requires-python = ">= 3.12, <3.13"
24
  requires = ["hatchling"]
25
  build-backend = "hatchling.build"
26
 
27
- [tool.rye]
28
- managed = true
29
- dev-dependencies = []
30
-
31
  [tool.hatch.metadata]
32
  allow-direct-references = true
33
 
@@ -53,3 +49,9 @@ select = [
53
  [tool.pyright]
54
  include = ["src"]
55
  exclude = ["**/__pycache__"]
 
 
 
 
 
 
 
8
  dependencies = [
9
  "gradio>=5.27.1",
10
  "gradio-image-annotation>=0.2.5",
11
+ "pillow>=11.3.0",
12
+ "pi-heif>=1.1.0",
13
  "refiners @ git+https://github.com/finegrain-ai/refiners",
14
  "numba>=0.60.0",
15
  "pymatting>=1.1.12",
 
24
  requires = ["hatchling"]
25
  build-backend = "hatchling.build"
26
 
 
 
 
 
27
  [tool.hatch.metadata]
28
  allow-direct-references = true
29
 
 
49
  [tool.pyright]
50
  include = ["src"]
51
  exclude = ["**/__pycache__"]
52
+
53
+ [dependency-groups]
54
+ dev = [
55
+ "pyright>=1.1.404",
56
+ "ruff>=0.12.10",
57
+ ]
requirements.lock DELETED
@@ -1,253 +0,0 @@
1
- # generated by rye
2
- # use `rye lock` or `rye sync` to update this lockfile
3
- #
4
- # last locked with the following flags:
5
- # pre: false
6
- # features: []
7
- # all-features: false
8
- # with-sources: false
9
- # generate-hashes: false
10
- # universal: false
11
-
12
- -e file:.
13
- aiofiles==24.1.0
14
- # via gradio
15
- annotated-types==0.7.0
16
- # via pydantic
17
- anyio==4.9.0
18
- # via gradio
19
- # via httpx
20
- # via starlette
21
- certifi==2025.4.26
22
- # via httpcore
23
- # via httpx
24
- # via requests
25
- charset-normalizer==3.4.1
26
- # via requests
27
- click==8.1.8
28
- # via typer
29
- # via uvicorn
30
- fastapi==0.115.12
31
- # via gradio
32
- ffmpy==0.5.0
33
- # via gradio
34
- filelock==3.18.0
35
- # via huggingface-hub
36
- # via torch
37
- # via transformers
38
- fsspec==2025.3.2
39
- # via gradio-client
40
- # via huggingface-hub
41
- # via torch
42
- gradio==5.27.1
43
- # via cutter
44
- # via gradio-image-annotation
45
- # via spaces
46
- gradio-client==1.9.1
47
- # via gradio
48
- gradio-image-annotation==0.2.6
49
- # via cutter
50
- groovy==0.1.2
51
- # via gradio
52
- h11==0.16.0
53
- # via httpcore
54
- # via uvicorn
55
- httpcore==1.0.9
56
- # via httpx
57
- httpx==0.28.1
58
- # via gradio
59
- # via gradio-client
60
- # via safehttpx
61
- # via spaces
62
- huggingface-hub==0.30.2
63
- # via gradio
64
- # via gradio-client
65
- # via tokenizers
66
- # via transformers
67
- idna==3.10
68
- # via anyio
69
- # via httpx
70
- # via requests
71
- jaxtyping==0.3.2
72
- # via refiners
73
- jinja2==3.1.6
74
- # via gradio
75
- # via torch
76
- llvmlite==0.44.0
77
- # via numba
78
- markdown-it-py==3.0.0
79
- # via rich
80
- markupsafe==3.0.2
81
- # via gradio
82
- # via jinja2
83
- mdurl==0.1.2
84
- # via markdown-it-py
85
- mpmath==1.3.0
86
- # via sympy
87
- networkx==3.4.2
88
- # via torch
89
- numba==0.61.2
90
- # via cutter
91
- # via pymatting
92
- numpy==1.26.4
93
- # via cutter
94
- # via gradio
95
- # via numba
96
- # via pandas
97
- # via pymatting
98
- # via refiners
99
- # via scipy
100
- # via transformers
101
- nvidia-cublas-cu12==12.6.4.1
102
- # via nvidia-cudnn-cu12
103
- # via nvidia-cusolver-cu12
104
- # via torch
105
- nvidia-cuda-cupti-cu12==12.6.80
106
- # via torch
107
- nvidia-cuda-nvrtc-cu12==12.6.77
108
- # via torch
109
- nvidia-cuda-runtime-cu12==12.6.77
110
- # via torch
111
- nvidia-cudnn-cu12==9.5.1.17
112
- # via torch
113
- nvidia-cufft-cu12==11.3.0.4
114
- # via torch
115
- nvidia-cufile-cu12==1.11.1.6
116
- # via torch
117
- nvidia-curand-cu12==10.3.7.77
118
- # via torch
119
- nvidia-cusolver-cu12==11.7.1.2
120
- # via torch
121
- nvidia-cusparse-cu12==12.5.4.2
122
- # via nvidia-cusolver-cu12
123
- # via torch
124
- nvidia-cusparselt-cu12==0.6.3
125
- # via torch
126
- nvidia-nccl-cu12==2.26.2
127
- # via torch
128
- nvidia-nvjitlink-cu12==12.6.85
129
- # via nvidia-cufft-cu12
130
- # via nvidia-cusolver-cu12
131
- # via nvidia-cusparse-cu12
132
- # via torch
133
- nvidia-nvtx-cu12==12.6.77
134
- # via torch
135
- orjson==3.10.16
136
- # via gradio
137
- packaging==25.0
138
- # via gradio
139
- # via gradio-client
140
- # via huggingface-hub
141
- # via refiners
142
- # via spaces
143
- # via transformers
144
- pandas==2.2.3
145
- # via gradio
146
- pillow==11.2.1
147
- # via cutter
148
- # via gradio
149
- # via pillow-heif
150
- # via pymatting
151
- # via refiners
152
- pillow-heif==0.22.0
153
- # via cutter
154
- psutil==5.9.8
155
- # via spaces
156
- pydantic==2.11.3
157
- # via fastapi
158
- # via gradio
159
- # via spaces
160
- pydantic-core==2.33.1
161
- # via pydantic
162
- pydub==0.25.1
163
- # via gradio
164
- pygments==2.19.1
165
- # via rich
166
- pymatting==1.1.13
167
- # via cutter
168
- python-dateutil==2.9.0.post0
169
- # via pandas
170
- python-multipart==0.0.20
171
- # via gradio
172
- pytz==2025.2
173
- # via pandas
174
- pyyaml==6.0.2
175
- # via gradio
176
- # via huggingface-hub
177
- # via transformers
178
- refiners @ git+https://github.com/finegrain-ai/refiners@cfe8b66ba4f8a906583850ac25e9e89cb83a44b9
179
- # via cutter
180
- regex==2024.11.6
181
- # via transformers
182
- requests==2.32.3
183
- # via huggingface-hub
184
- # via spaces
185
- # via transformers
186
- rich==14.0.0
187
- # via typer
188
- ruff==0.11.7
189
- # via gradio
190
- safehttpx==0.1.6
191
- # via gradio
192
- safetensors==0.5.3
193
- # via refiners
194
- # via transformers
195
- scipy==1.15.2
196
- # via pymatting
197
- semantic-version==2.10.0
198
- # via gradio
199
- setuptools==80.0.0
200
- # via torch
201
- # via triton
202
- shellingham==1.5.4
203
- # via typer
204
- six==1.17.0
205
- # via python-dateutil
206
- sniffio==1.3.1
207
- # via anyio
208
- spaces==0.35.0
209
- # via cutter
210
- starlette==0.46.2
211
- # via fastapi
212
- # via gradio
213
- sympy==1.14.0
214
- # via torch
215
- tokenizers==0.21.1
216
- # via transformers
217
- tomlkit==0.13.2
218
- # via gradio
219
- torch==2.7.0
220
- # via refiners
221
- tqdm==4.67.1
222
- # via huggingface-hub
223
- # via transformers
224
- transformers==4.51.3
225
- # via cutter
226
- triton==3.3.0
227
- # via torch
228
- typer==0.15.3
229
- # via gradio
230
- typing-extensions==4.13.2
231
- # via anyio
232
- # via fastapi
233
- # via gradio
234
- # via gradio-client
235
- # via huggingface-hub
236
- # via pydantic
237
- # via pydantic-core
238
- # via spaces
239
- # via torch
240
- # via typer
241
- # via typing-inspection
242
- typing-inspection==0.4.0
243
- # via pydantic
244
- tzdata==2025.2
245
- # via pandas
246
- urllib3==2.4.0
247
- # via requests
248
- uvicorn==0.34.2
249
- # via gradio
250
- wadler-lindig==0.1.5
251
- # via jaxtyping
252
- websockets==15.0.1
253
- # via gradio-client
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
requirements.txt CHANGED
@@ -1,7 +1,7 @@
1
  spaces>=0.35.0
2
  gradio_image_annotation>=0.2.5
3
- pillow>=10.4.0
4
- pillow-heif>=0.18.0
5
  git+https://github.com/finegrain-ai/refiners@cfe8b66ba4f8a906583850ac25e9e89cb83a44b9
6
  numba>=0.60.0
7
  pymatting>=1.1.12
 
1
  spaces>=0.35.0
2
  gradio_image_annotation>=0.2.5
3
+ pillow>=11.3.0
4
+ pi_heif>=1.1.0
5
  git+https://github.com/finegrain-ai/refiners@cfe8b66ba4f8a906583850ac25e9e89cb83a44b9
6
  numba>=0.60.0
7
  pymatting>=1.1.12
src/app.py CHANGED
@@ -5,7 +5,7 @@ from typing import Any, cast
5
 
6
  import gradio as gr
7
  import numpy as np
8
- import pillow_heif
9
  import spaces
10
  import torch
11
  from gradio_image_annotation import image_annotator
@@ -17,8 +17,7 @@ from transformers import GroundingDinoForObjectDetection, GroundingDinoProcessor
17
 
18
  BoundingBox = tuple[int, int, int, int]
19
 
20
- pillow_heif.register_heif_opener()
21
- pillow_heif.register_avif_opener()
22
 
23
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
24
 
@@ -64,7 +63,7 @@ def gd_detect(img: Image.Image, prompt: str) -> BoundingBox | None:
64
  width, height = img.size
65
  results: dict[str, Any] = gd_processor.post_process_grounded_object_detection(
66
  outputs,
67
- inputs["input_ids"],
68
  target_sizes=[(height, width)],
69
  )[0]
70
  assert "boxes" in results and isinstance(results["boxes"], torch.Tensor)
@@ -120,6 +119,10 @@ def _gpu_process(
120
  return mask, bbox, time_log
121
 
122
 
 
 
 
 
123
  def _process(
124
  img: Image.Image,
125
  prompt: str | BoundingBox | None,
@@ -141,7 +144,7 @@ def _process(
141
 
142
  masked_rgb = Image.alpha_composite(Image.new("RGBA", masked_alpha.size, "white"), masked_alpha)
143
 
144
- thresholded = mask.point(lambda p: 255 if p > 10 else 0)
145
  bbox = thresholded.getbbox()
146
  to_dl = masked_alpha.crop(bbox)
147
 
 
5
 
6
  import gradio as gr
7
  import numpy as np
8
+ import pi_heif
9
  import spaces
10
  import torch
11
  from gradio_image_annotation import image_annotator
 
17
 
18
  BoundingBox = tuple[int, int, int, int]
19
 
20
+ pi_heif.register_heif_opener()
 
21
 
22
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
23
 
 
63
  width, height = img.size
64
  results: dict[str, Any] = gd_processor.post_process_grounded_object_detection(
65
  outputs,
66
+ inputs["input_ids"], # type: ignore
67
  target_sizes=[(height, width)],
68
  )[0]
69
  assert "boxes" in results and isinstance(results["boxes"], torch.Tensor)
 
119
  return mask, bbox, time_log
120
 
121
 
122
+ def _thresh(p: int) -> float:
123
+ return 255.0 if p > 10 else 0.0
124
+
125
+
126
  def _process(
127
  img: Image.Image,
128
  prompt: str | BoundingBox | None,
 
144
 
145
  masked_rgb = Image.alpha_composite(Image.new("RGBA", masked_alpha.size, "white"), masked_alpha)
146
 
147
+ thresholded = mask.point(_thresh)
148
  bbox = thresholded.getbbox()
149
  to_dl = masked_alpha.crop(bbox)
150
 
uv.lock ADDED
The diff for this file is too large to render. See raw diff