Spaces:
Running
on
Zero
Running
on
Zero
Commit
·
48acb1d
1
Parent(s):
76fcb7a
UAdd two more model options, udate the torch version to 2.5.1, and the app_local.py.
Browse files- app.py +6 -2
- app_local.py +7 -3
- requirements.txt +2 -2
app.py
CHANGED
@@ -74,9 +74,11 @@ class ImagePreprocessor():
|
|
74 |
usage_to_weights_file = {
|
75 |
'General': 'BiRefNet',
|
76 |
'General-HR': 'BiRefNet_HR',
|
|
|
77 |
'General-Lite': 'BiRefNet_lite',
|
78 |
'General-Lite-2K': 'BiRefNet_lite-2K',
|
79 |
'Matting': 'BiRefNet-matting',
|
|
|
80 |
'Portrait': 'BiRefNet-portrait',
|
81 |
'DIS': 'BiRefNet-DIS5K',
|
82 |
'HRSOD': 'BiRefNet-HRSOD',
|
@@ -105,10 +107,12 @@ def predict(images, resolution, weights_file):
|
|
105 |
try:
|
106 |
resolution = [int(int(reso)//32*32) for reso in resolution.strip().split('x')]
|
107 |
except:
|
108 |
-
if weights_file
|
109 |
resolution = (2048, 2048)
|
110 |
-
elif weights_file
|
111 |
resolution = (2560, 1440)
|
|
|
|
|
112 |
else:
|
113 |
resolution = (1024, 1024)
|
114 |
print('Invalid resolution input. Automatically changed to 1024x1024 / 2048x2048 / 2560x1440.')
|
|
|
74 |
usage_to_weights_file = {
|
75 |
'General': 'BiRefNet',
|
76 |
'General-HR': 'BiRefNet_HR',
|
77 |
+
'General-reso_512': 'BiRefNet-reso_512',
|
78 |
'General-Lite': 'BiRefNet_lite',
|
79 |
'General-Lite-2K': 'BiRefNet_lite-2K',
|
80 |
'Matting': 'BiRefNet-matting',
|
81 |
+
'Matting-HR': 'BiRefNet_HR-matting',
|
82 |
'Portrait': 'BiRefNet-portrait',
|
83 |
'DIS': 'BiRefNet-DIS5K',
|
84 |
'HRSOD': 'BiRefNet-HRSOD',
|
|
|
107 |
try:
|
108 |
resolution = [int(int(reso)//32*32) for reso in resolution.strip().split('x')]
|
109 |
except:
|
110 |
+
if weights_file in ['General-HR', 'Matting-HR']:
|
111 |
resolution = (2048, 2048)
|
112 |
+
elif weights_file in ['General-Lite-2K']:
|
113 |
resolution = (2560, 1440)
|
114 |
+
elif weights_file in ['General-reso_512']:
|
115 |
+
resolution = (512, 512)
|
116 |
else:
|
117 |
resolution = (1024, 1024)
|
118 |
print('Invalid resolution input. Automatically changed to 1024x1024 / 2048x2048 / 2560x1440.')
|
app_local.py
CHANGED
@@ -74,9 +74,11 @@ class ImagePreprocessor():
|
|
74 |
usage_to_weights_file = {
|
75 |
'General': 'BiRefNet',
|
76 |
'General-HR': 'BiRefNet_HR',
|
|
|
77 |
'General-Lite': 'BiRefNet_lite',
|
78 |
'General-Lite-2K': 'BiRefNet_lite-2K',
|
79 |
'Matting': 'BiRefNet-matting',
|
|
|
80 |
'Portrait': 'BiRefNet-portrait',
|
81 |
'DIS': 'BiRefNet-DIS5K',
|
82 |
'HRSOD': 'BiRefNet-HRSOD',
|
@@ -105,10 +107,12 @@ def predict(images, resolution, weights_file):
|
|
105 |
try:
|
106 |
resolution = [int(int(reso)//32*32) for reso in resolution.strip().split('x')]
|
107 |
except:
|
108 |
-
if weights_file
|
109 |
resolution = (2048, 2048)
|
110 |
-
elif weights_file
|
111 |
resolution = (2560, 1440)
|
|
|
|
|
112 |
else:
|
113 |
resolution = (1024, 1024)
|
114 |
print('Invalid resolution input. Automatically changed to 1024x1024 / 2048x2048 / 2560x1440.')
|
@@ -182,7 +186,7 @@ for idx_example_url, example_url in enumerate(examples_url):
|
|
182 |
examples_url[idx_example_url].append('1024x1024')
|
183 |
|
184 |
descriptions = ('Upload a picture, our model will extract a highly accurate segmentation of the subject in it.\n)'
|
185 |
-
' The resolution used in our training was `1024x1024`,
|
186 |
' Our codes can be found at https://github.com/ZhengPeng7/BiRefNet.\n'
|
187 |
' We also maintain the HF model of BiRefNet at https://huggingface.co/ZhengPeng7/BiRefNet for easier access.')
|
188 |
|
|
|
74 |
usage_to_weights_file = {
|
75 |
'General': 'BiRefNet',
|
76 |
'General-HR': 'BiRefNet_HR',
|
77 |
+
'General-reso_512': 'BiRefNet-reso_512',
|
78 |
'General-Lite': 'BiRefNet_lite',
|
79 |
'General-Lite-2K': 'BiRefNet_lite-2K',
|
80 |
'Matting': 'BiRefNet-matting',
|
81 |
+
'Matting-HR': 'BiRefNet_HR-matting',
|
82 |
'Portrait': 'BiRefNet-portrait',
|
83 |
'DIS': 'BiRefNet-DIS5K',
|
84 |
'HRSOD': 'BiRefNet-HRSOD',
|
|
|
107 |
try:
|
108 |
resolution = [int(int(reso)//32*32) for reso in resolution.strip().split('x')]
|
109 |
except:
|
110 |
+
if weights_file in ['General-HR', 'Matting-HR']:
|
111 |
resolution = (2048, 2048)
|
112 |
+
elif weights_file in ['General-Lite-2K']:
|
113 |
resolution = (2560, 1440)
|
114 |
+
elif weights_file in ['General-reso_512']:
|
115 |
+
resolution = (512, 512)
|
116 |
else:
|
117 |
resolution = (1024, 1024)
|
118 |
print('Invalid resolution input. Automatically changed to 1024x1024 / 2048x2048 / 2560x1440.')
|
|
|
186 |
examples_url[idx_example_url].append('1024x1024')
|
187 |
|
188 |
descriptions = ('Upload a picture, our model will extract a highly accurate segmentation of the subject in it.\n)'
|
189 |
+
' The resolution used in our training was `1024x1024`, which is the suggested resolution to obtain good results! `2048x2048` is suggested for BiRefNet_HR.\n'
|
190 |
' Our codes can be found at https://github.com/ZhengPeng7/BiRefNet.\n'
|
191 |
' We also maintain the HF model of BiRefNet at https://huggingface.co/ZhengPeng7/BiRefNet for easier access.')
|
192 |
|
requirements.txt
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
-
torch==2.
|
2 |
-
torchvision
|
3 |
numpy<2
|
4 |
opencv-python
|
5 |
tqdm
|
|
|
1 |
+
torch==2.5.1
|
2 |
+
torchvision
|
3 |
numpy<2
|
4 |
opencv-python
|
5 |
tqdm
|