rawalkhirodkar commited on
Commit
69445cc
·
0 Parent(s):

Add initial commit

Browse files
.gitattributes ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ *.pt2 filter=lfs diff=lfs merge=lfs -text
37
+ *.png filter=lfs diff=lfs merge=lfs -text
38
+ *.jpg filter=lfs diff=lfs merge=lfs -text
39
+ *.jpeg filter=lfs diff=lfs merge=lfs -text
40
+ *.gif filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Sapiens Normal
3
+ emoji: 💻
4
+ colorFrom: green
5
+ colorTo: pink
6
+ sdk: gradio
7
+ sdk_version: 4.42.0
8
+ app_file: app.py
9
+ pinned: false
10
+ license: cc-by-nc-4.0
11
+ ---
12
+
13
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,333 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import cv2
3
+ import gradio as gr
4
+ import numpy as np
5
+ import spaces
6
+ import torch
7
+ import torch.nn.functional as F
8
+ from gradio.themes.utils import sizes
9
+ from PIL import Image
10
+ from torchvision import transforms
11
+ import tempfile
12
+
13
+ class Config:
14
+ ASSETS_DIR = os.path.join(os.path.dirname(__file__), 'assets')
15
+ CHECKPOINTS_DIR = os.path.join(ASSETS_DIR, "checkpoints")
16
+ CHECKPOINTS = {
17
+ "0.3b": "sapiens_0.3b_normal_render_people_epoch_66_torchscript.pt2",
18
+ "0.6b": "sapiens_0.6b_normal_render_people_epoch_200_torchscript.pt2",
19
+ "1b": "sapiens_1b_normal_render_people_epoch_115_torchscript.pt2",
20
+ "2b": "sapiens_2b_normal_render_people_epoch_70_torchscript.pt2",
21
+ }
22
+ SEG_CHECKPOINTS = {
23
+ "fg-bg-1b (recommended)": "sapiens_1b_seg_foreground_epoch_8_torchscript.pt2",
24
+ "no-bg-removal": None,
25
+ "part-seg-1b": "sapiens_1b_goliath_best_goliath_mIoU_7994_epoch_151_torchscript.pt2",
26
+ }
27
+
28
+ class ModelManager:
29
+ @staticmethod
30
+ def load_model(checkpoint_name: str):
31
+ if checkpoint_name is None:
32
+ return None
33
+ checkpoint_path = os.path.join(Config.CHECKPOINTS_DIR, checkpoint_name)
34
+ model = torch.jit.load(checkpoint_path)
35
+ model.eval()
36
+ model.to("cuda")
37
+ return model
38
+
39
+ @staticmethod
40
+ @torch.inference_mode()
41
+ def run_model(model, input_tensor, height, width):
42
+ output = model(input_tensor)
43
+ return F.interpolate(output, size=(height, width), mode="bilinear", align_corners=False)
44
+
45
+ class ImageProcessor:
46
+ def __init__(self):
47
+ self.transform_fn = transforms.Compose([
48
+ transforms.Resize((1024, 768)),
49
+ transforms.ToTensor(),
50
+ transforms.Normalize(mean=[123.5/255, 116.5/255, 103.5/255], std=[58.5/255, 57.0/255, 57.5/255]),
51
+ ])
52
+
53
+ @spaces.GPU
54
+ def process_image(self, image: Image.Image, normal_model_name: str, seg_model_name: str):
55
+ # Load models here instead of storing them as class attributes
56
+ normal_model = ModelManager.load_model(Config.CHECKPOINTS[normal_model_name])
57
+ input_tensor = self.transform_fn(image).unsqueeze(0).to("cuda")
58
+
59
+ # Run normal estimation
60
+ normal_output = ModelManager.run_model(normal_model, input_tensor, image.height, image.width)
61
+ normal_map = normal_output.squeeze().cpu().numpy().transpose(1, 2, 0)
62
+
63
+ # Create a copy of the normal map for visualization
64
+ normal_map_vis = normal_map.copy()
65
+
66
+ # Run segmentation
67
+ if seg_model_name != "no-bg-removal":
68
+ seg_model = ModelManager.load_model(Config.SEG_CHECKPOINTS[seg_model_name])
69
+ seg_output = ModelManager.run_model(seg_model, input_tensor, image.height, image.width)
70
+ seg_mask = (seg_output.argmax(dim=1) > 0).float().cpu().numpy()[0]
71
+
72
+ # Apply segmentation mask to normal maps
73
+ normal_map[seg_mask == 0] = np.nan # Set background to NaN for NPY file
74
+ normal_map_vis[seg_mask == 0] = -1 # Set background to -1 for visualization
75
+
76
+ # Normalize and visualize normal map
77
+ normal_map_vis = self.visualize_normal_map(normal_map_vis)
78
+
79
+ # Create downloadable .npy file
80
+ npy_path = tempfile.mktemp(suffix='.npy')
81
+ np.save(npy_path, normal_map)
82
+
83
+ return Image.fromarray(normal_map_vis), npy_path
84
+
85
+ @staticmethod
86
+ def visualize_normal_map(normal_map):
87
+ normal_map_norm = np.linalg.norm(normal_map, axis=-1, keepdims=True)
88
+ normal_map_normalized = normal_map / (normal_map_norm + 1e-5)
89
+ normal_map_vis = ((normal_map_normalized + 1) / 2 * 255).astype(np.uint8)
90
+ return normal_map_vis
91
+
92
+ class GradioInterface:
93
+ def __init__(self):
94
+ self.image_processor = ImageProcessor()
95
+
96
+ def create_interface(self):
97
+ app_styles = """
98
+ <style>
99
+ /* Global Styles */
100
+ body, #root {
101
+ font-family: Helvetica, Arial, sans-serif;
102
+ background-color: #1a1a1a;
103
+ color: #fafafa;
104
+ }
105
+
106
+ /* Header Styles */
107
+ .app-header {
108
+ background: linear-gradient(45deg, #1a1a1a 0%, #333333 100%);
109
+ padding: 24px;
110
+ border-radius: 8px;
111
+ margin-bottom: 24px;
112
+ text-align: center;
113
+ }
114
+
115
+ .app-title {
116
+ font-size: 48px;
117
+ margin: 0;
118
+ color: #fafafa;
119
+ }
120
+
121
+ .app-subtitle {
122
+ font-size: 24px;
123
+ margin: 8px 0 16px;
124
+ color: #fafafa;
125
+ }
126
+
127
+ .app-description {
128
+ font-size: 16px;
129
+ line-height: 1.6;
130
+ opacity: 0.8;
131
+ margin-bottom: 24px;
132
+ }
133
+
134
+ /* Button Styles */
135
+ .publication-links {
136
+ display: flex;
137
+ justify-content: center;
138
+ flex-wrap: wrap;
139
+ gap: 8px;
140
+ margin-bottom: 16px;
141
+ }
142
+
143
+ .publication-link {
144
+ display: inline-flex;
145
+ align-items: center;
146
+ padding: 8px 16px;
147
+ background-color: #333;
148
+ color: #fff !important;
149
+ text-decoration: none !important;
150
+ border-radius: 20px;
151
+ font-size: 14px;
152
+ transition: background-color 0.3s;
153
+ }
154
+
155
+ .publication-link:hover {
156
+ background-color: #555;
157
+ }
158
+
159
+ .publication-link i {
160
+ margin-right: 8px;
161
+ }
162
+
163
+ /* Content Styles */
164
+ .content-container {
165
+ background-color: #2a2a2a;
166
+ border-radius: 8px;
167
+ padding: 24px;
168
+ margin-bottom: 24px;
169
+ }
170
+
171
+ /* Image Styles */
172
+ .image-preview img {
173
+ max-width: 100%;
174
+ max-height: 512px;
175
+ margin: 0 auto;
176
+ border-radius: 4px;
177
+ display: block;
178
+ }
179
+
180
+ /* Control Styles */
181
+ .control-panel {
182
+ background-color: #333;
183
+ padding: 16px;
184
+ border-radius: 8px;
185
+ margin-top: 16px;
186
+ }
187
+
188
+ /* Gradio Component Overrides */
189
+ .gr-button {
190
+ background-color: #4a4a4a;
191
+ color: #fff;
192
+ border: none;
193
+ border-radius: 4px;
194
+ padding: 8px 16px;
195
+ cursor: pointer;
196
+ transition: background-color 0.3s;
197
+ }
198
+
199
+ .gr-button:hover {
200
+ background-color: #5a5a5a;
201
+ }
202
+
203
+ .gr-input, .gr-dropdown {
204
+ background-color: #3a3a3a;
205
+ color: #fff;
206
+ border: 1px solid #4a4a4a;
207
+ border-radius: 4px;
208
+ padding: 8px;
209
+ }
210
+
211
+ .gr-form {
212
+ background-color: transparent;
213
+ }
214
+
215
+ .gr-panel {
216
+ border: none;
217
+ background-color: transparent;
218
+ }
219
+
220
+ /* Override any conflicting styles from Bulma */
221
+ .button.is-normal.is-rounded.is-dark {
222
+ color: #fff !important;
223
+ text-decoration: none !important;
224
+ }
225
+ </style>
226
+ """
227
+
228
+ header_html = f"""
229
+ <link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/[email protected]/css/bulma.min.css">
230
+ <link rel="stylesheet" href="https://use.fontawesome.com/releases/v5.15.4/css/all.css">
231
+ {app_styles}
232
+ <div class="app-header">
233
+ <h1 class="app-title">Sapiens: Normal Estimation</h1>
234
+ <h2 class="app-subtitle">ECCV 2024 (Oral)</h2>
235
+ <p class="app-description">
236
+ Meta presents Sapiens, foundation models for human tasks pretrained on 300 million human images.
237
+ This demo showcases the finetuned normal estimation model. <br>
238
+ Checkout other normal estimation baselines to compare: <a href="https://huggingface.co/spaces/Stable-X/normal-estimation-arena" style="color: #3273dc;">normal-estimation-arena</a>
239
+ </p>
240
+ <div class="publication-links">
241
+ <a href="https://arxiv.org/abs/2408.12569" class="publication-link">
242
+ <i class="fas fa-file-pdf"></i>arXiv
243
+ </a>
244
+ <a href="https://github.com/facebookresearch/sapiens" class="publication-link">
245
+ <i class="fab fa-github"></i>Code
246
+ </a>
247
+ <a href="https://about.meta.com/realitylabs/codecavatars/sapiens/" class="publication-link">
248
+ <i class="fas fa-globe"></i>Meta
249
+ </a>
250
+ <a href="https://rawalkhirodkar.github.io/sapiens" class="publication-link">
251
+ <i class="fas fa-chart-bar"></i>Results
252
+ </a>
253
+ </div>
254
+ <div class="publication-links">
255
+ <a href="https://huggingface.co/spaces/facebook/sapiens_pose" class="publication-link">
256
+ <i class="fas fa-user"></i>Demo-Pose
257
+ </a>
258
+ <a href="https://huggingface.co/spaces/facebook/sapiens_seg" class="publication-link">
259
+ <i class="fas fa-puzzle-piece"></i>Demo-Seg
260
+ </a>
261
+ <a href="https://huggingface.co/spaces/facebook/sapiens_depth" class="publication-link">
262
+ <i class="fas fa-cube"></i>Demo-Depth
263
+ </a>
264
+ <a href="https://huggingface.co/spaces/facebook/sapiens_normal" class="publication-link">
265
+ <i class="fas fa-vector-square"></i>Demo-Normal
266
+ </a>
267
+ </div>
268
+ </div>
269
+ """
270
+
271
+ def process_image(image, normal_model_name, seg_model_name):
272
+ result, npy_path = self.image_processor.process_image(image, normal_model_name, seg_model_name)
273
+ return result, npy_path
274
+
275
+ js_func = """
276
+ function refresh() {
277
+ const url = new URL(window.location);
278
+ if (url.searchParams.get('__theme') !== 'dark') {
279
+ url.searchParams.set('__theme', 'dark');
280
+ window.location.href = url.href;
281
+ }
282
+ }
283
+ """
284
+
285
+ with gr.Blocks(js=js_func, theme=gr.themes.Default()) as demo:
286
+ gr.HTML(header_html)
287
+ with gr.Row(elem_classes="content-container"):
288
+ with gr.Column():
289
+ input_image = gr.Image(label="Input Image", type="pil", format="png", elem_classes="image-preview")
290
+ with gr.Row(elem_classes="control-panel"):
291
+ normal_model_name = gr.Dropdown(
292
+ label="Normal Model Size",
293
+ choices=list(Config.CHECKPOINTS.keys()),
294
+ value="1b",
295
+ )
296
+ seg_model_name = gr.Dropdown(
297
+ label="Background Removal Model",
298
+ choices=list(Config.SEG_CHECKPOINTS.keys()),
299
+ value="fg-bg-1b (recommended)",
300
+ )
301
+ example_model = gr.Examples(
302
+ inputs=input_image,
303
+ examples_per_page=14,
304
+ examples=[
305
+ os.path.join(Config.ASSETS_DIR, "images", img)
306
+ for img in os.listdir(os.path.join(Config.ASSETS_DIR, "images"))
307
+ ],
308
+ )
309
+ with gr.Column():
310
+ result_image = gr.Image(label="Normal Estimation Result", type="pil", elem_classes="image-preview")
311
+ npy_output = gr.File(label="Output (.npy). Note: Background normal is NaN.")
312
+ run_button = gr.Button("Run", elem_classes="gr-button")
313
+
314
+ run_button.click(
315
+ fn=process_image,
316
+ inputs=[input_image, normal_model_name, seg_model_name],
317
+ outputs=[result_image, npy_output],
318
+ )
319
+
320
+ return demo
321
+
322
+ def main():
323
+ # Configure CUDA if available
324
+ if torch.cuda.is_available() and torch.cuda.get_device_properties(0).major >= 8:
325
+ torch.backends.cuda.matmul.allow_tf32 = True
326
+ torch.backends.cudnn.allow_tf32 = True
327
+
328
+ interface = GradioInterface()
329
+ demo = interface.create_interface()
330
+ demo.launch(share=False)
331
+
332
+ if __name__ == "__main__":
333
+ main()
assets/checkpoints/sapiens_0.3b_normal_render_people_epoch_66_torchscript.pt2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aa2db29f0033e7415843842b3c55a7806397116ca3b7dc6c9b2e7914dacba313
3
+ size 1358768084
assets/checkpoints/sapiens_0.6b_normal_render_people_epoch_200_torchscript.pt2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5367e673a59e6d8cb04f5cb9ae3c675313bc20f844ef51daf53fa8dc020562b1
3
+ size 2685035027
assets/checkpoints/sapiens_1b_goliath_best_goliath_mIoU_7994_epoch_151_torchscript.pt2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:33bba30f3de8d9cfd44e4eaa4817b1bfdd98c188edfc87fa7cc031ba0f4edc17
3
+ size 4716314057
assets/checkpoints/sapiens_1b_normal_render_people_epoch_115_torchscript.pt2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:00e29d62c385de04f40bc188dd4571e19cab26a8dbc1424d61a77206b3758fb2
3
+ size 4716203073
assets/checkpoints/sapiens_1b_seg_foreground_epoch_8_torchscript.pt2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:88d2f1590fe040189ad5e9b689099fe3e7a242b4b14bc4d53cff101c20818946
3
+ size 4716180479
assets/checkpoints/sapiens_2b_normal_render_people_epoch_70_torchscript.pt2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:80f94a277f8cbd73a5ffd00c9dbdc6f2d59e66d5ffa00c56ee9706e4cf9292ea
3
+ size 8706490978
assets/images/68204.png ADDED

Git LFS Details

  • SHA256: 9b0268cb801ed164864a4b5f6d131e0ac5cc2fbd149a6467d5d0c97da47122c2
  • Pointer size: 132 Bytes
  • Size of remote file: 4.29 MB
assets/images/68210.png ADDED

Git LFS Details

  • SHA256: dbe5f80498af4ebd1ff09ae4184f37c20ba981e53bd554c3cc78d39ae0ee7fd7
  • Pointer size: 132 Bytes
  • Size of remote file: 3.93 MB
assets/images/68658.png ADDED

Git LFS Details

  • SHA256: 61a68b619bd17235e683324f2826ce0693322e45ab8c86f1c057851ecb333ac7
  • Pointer size: 132 Bytes
  • Size of remote file: 5.1 MB
assets/images/68666.png ADDED

Git LFS Details

  • SHA256: ea3047e6c2ccb485fdb3966aa2325e803cbf49c27c0bff00287b44bc16f18914
  • Pointer size: 132 Bytes
  • Size of remote file: 4.56 MB
assets/images/68691.png ADDED

Git LFS Details

  • SHA256: fae39e4055c1b297af7068cdddfeeba8d685363281b839d8c5afac1980204b57
  • Pointer size: 132 Bytes
  • Size of remote file: 3.74 MB
assets/images/68956.png ADDED

Git LFS Details

  • SHA256: eee1f27082b10999d0fa848121ecb06cda3386b1a864b9aa0f59ae78261f8908
  • Pointer size: 132 Bytes
  • Size of remote file: 4.15 MB
assets/images/pexels-amresh444-17315601.png ADDED

Git LFS Details

  • SHA256: 4e17ee1b229147e4b52e8348a6ef426bc9e9a2f90738e776e15b26b325abb9b3
  • Pointer size: 132 Bytes
  • Size of remote file: 3.5 MB
assets/images/pexels-gabby-k-6311686.png ADDED

Git LFS Details

  • SHA256: 3f10eded3fb05ab04b963f7b9fd2e183d8d4e81b20569b1c6b0653549639421f
  • Pointer size: 132 Bytes
  • Size of remote file: 3.65 MB
assets/images/pexels-julia-m-cameron-4145040.png ADDED

Git LFS Details

  • SHA256: 459cf0280667b028ffbca16aa11188780d7a0205c0defec02916ff3cbaeecb72
  • Pointer size: 132 Bytes
  • Size of remote file: 2.92 MB
assets/images/pexels-marcus-aurelius-6787357.png ADDED

Git LFS Details

  • SHA256: 7d35452f76492125eaf7d5783aa9fd6b0d5990ebe0579fe9dfd58a9d634f4955
  • Pointer size: 132 Bytes
  • Size of remote file: 3.3 MB
assets/images/pexels-mo-saeed-3616599-5409085.png ADDED

Git LFS Details

  • SHA256: 7c1ca7afd6c2a654e94ef59d5fb56fca4f3cde5fb5216f6b218c34a7b8c143dc
  • Pointer size: 132 Bytes
  • Size of remote file: 3.13 MB
assets/images/pexels-riedelmax-27355495.png ADDED

Git LFS Details

  • SHA256: 4141d2f5f718f162ea1f6710c06b28b5cb51fd69598fde35948f8f3491228164
  • Pointer size: 132 Bytes
  • Size of remote file: 3.73 MB
assets/images/pexels-sergeymakashin-5368660.png ADDED

Git LFS Details

  • SHA256: af8f5a8f26dd102d87d94c1be36ec903791fe8e6d951c68ebb9ebcfc6d7397bb
  • Pointer size: 132 Bytes
  • Size of remote file: 4.08 MB
assets/images/pexels-vinicius-wiesehofer-289347-4219918.png ADDED

Git LFS Details

  • SHA256: a6eef5eee15b81fe65ea95627e9a46040b9889466689b3c1ca6ed273e02fe84f
  • Pointer size: 132 Bytes
  • Size of remote file: 3.63 MB
requirements.txt ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ gradio
2
+ numpy
3
+ torch
4
+ torchvision
5
+ matplotlib
6
+ pillow
7
+ spaces
8
+ opencv-python