Upload folder using huggingface_hub
Browse files- README.md +55 -0
- cc.py +10 -0
- cc.py~ +10 -0
- config.json +34 -0
- model.safetensors +3 -0
- preprocessor_config.json +15 -0
- preprocessor_config.json~ +15 -0
- pytorch_model.bin +3 -0
README.md
ADDED
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
language: en
|
3 |
+
license: mit
|
4 |
+
tags:
|
5 |
+
- fundus
|
6 |
+
- diabetic retinopathy
|
7 |
+
- classification
|
8 |
+
datasets:
|
9 |
+
- APTOS
|
10 |
+
- EYEPACS
|
11 |
+
- IDRID
|
12 |
+
- DDR
|
13 |
+
library: timm
|
14 |
+
model-index:
|
15 |
+
- name: vit_base_patch14_dinov2
|
16 |
+
results:
|
17 |
+
- task:
|
18 |
+
type: image-classification
|
19 |
+
dataset:
|
20 |
+
name: EYEPACS
|
21 |
+
type: EYEPACS
|
22 |
+
metrics:
|
23 |
+
- type: kappa
|
24 |
+
value: 0.7338405847549438
|
25 |
+
name: Quadratic Kappa
|
26 |
+
- task:
|
27 |
+
type: image-classification
|
28 |
+
dataset:
|
29 |
+
name: IDRID
|
30 |
+
type: IDRID
|
31 |
+
metrics:
|
32 |
+
- type: kappa
|
33 |
+
value: 0.8239316344261169
|
34 |
+
name: Quadratic Kappa
|
35 |
+
- task:
|
36 |
+
type: image-classification
|
37 |
+
dataset:
|
38 |
+
name: DDR
|
39 |
+
type: DDR
|
40 |
+
metrics:
|
41 |
+
- type: kappa
|
42 |
+
value: 0.7518133521080017
|
43 |
+
name: Quadratic Kappa
|
44 |
+
---
|
45 |
+
# Fundus DR Grading
|
46 |
+
|
47 |
+
[](https://rye-up.com)
|
48 |
+
[](https://pytorch.org/docs/stable/index.html)
|
49 |
+
[](https://lightning.ai/docs/pytorch/stable/)
|
50 |
+
|
51 |
+
## Description
|
52 |
+
|
53 |
+
This project aims to evaluate the performance of different models for the classification of diabetic retinopathy (DR) in fundus images. The reported perfomance metrics are not always consistent in the literature. Our goal is to provide a fair comparison between different models using the same datasets and evaluation protocol.
|
54 |
+
|
55 |
+
|
cc.py
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import safetensors
|
2 |
+
from safetensors.torch import save_file
|
3 |
+
|
4 |
+
tensors = dict()
|
5 |
+
safetensors_path="/Users/matt/Downloads/hfclone/FundusDRGrading-vit_base_patch14_dinov2/model.safetensors"
|
6 |
+
with safetensors.safe_open(safetensors_path, framework="pt") as f:
|
7 |
+
for key in f.keys():
|
8 |
+
tensors[key] = f.get_tensor(key)
|
9 |
+
|
10 |
+
save_file(tensors, safetensors_path, metadata={'format': 'pt'})
|
cc.py~
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import safetensors
|
2 |
+
from safetensors.torch import save_file
|
3 |
+
|
4 |
+
tensors = dict()
|
5 |
+
safetensors_path="/Users/matt/Downloads/hfclone/FundusDRGrading-convnext_small/model.safetensors"
|
6 |
+
with safetensors.safe_open(safetensors_path, framework="pt") as f:
|
7 |
+
for key in f.keys():
|
8 |
+
tensors[key] = f.get_tensor(key)
|
9 |
+
|
10 |
+
save_file(tensors, safetensors_path, metadata={'format': 'pt'})
|
config.json
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"architecture": "vit_base_patch14_dinov2",
|
3 |
+
"num_classes": 1,
|
4 |
+
"num_features": 768,
|
5 |
+
"global_pool": "token",
|
6 |
+
"pretrained_cfg": {
|
7 |
+
"tag": "lvd142m",
|
8 |
+
"custom_load": false,
|
9 |
+
"input_size": [
|
10 |
+
3,
|
11 |
+
518,
|
12 |
+
518
|
13 |
+
],
|
14 |
+
"fixed_input_size": true,
|
15 |
+
"interpolation": "bicubic",
|
16 |
+
"crop_pct": 1.0,
|
17 |
+
"crop_mode": "center",
|
18 |
+
"mean": [
|
19 |
+
0.485,
|
20 |
+
0.456,
|
21 |
+
0.406
|
22 |
+
],
|
23 |
+
"std": [
|
24 |
+
0.229,
|
25 |
+
0.224,
|
26 |
+
0.225
|
27 |
+
],
|
28 |
+
"num_classes": 0,
|
29 |
+
"pool_size": null,
|
30 |
+
"first_conv": "patch_embed.proj",
|
31 |
+
"classifier": "head",
|
32 |
+
"license": "apache-2.0"
|
33 |
+
}
|
34 |
+
}
|
model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4f51709947c51cb5659315775fe55433ecb9d3f8651265cd87376b1a7cdc33d8
|
3 |
+
size 346113884
|
preprocessor_config.json
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"do_normalize": true,
|
3 |
+
"do_resize": true,
|
4 |
+
"image_mean": [
|
5 |
+
0.485,
|
6 |
+
0.456,
|
7 |
+
0.406
|
8 |
+
],
|
9 |
+
"image_std": [
|
10 |
+
0.229,
|
11 |
+
0.224,
|
12 |
+
0.225
|
13 |
+
],
|
14 |
+
"size": 518
|
15 |
+
}
|
preprocessor_config.json~
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"do_normalize": true,
|
3 |
+
"do_resize": true,
|
4 |
+
"image_mean": [
|
5 |
+
0.485,
|
6 |
+
0.456,
|
7 |
+
0.406
|
8 |
+
],
|
9 |
+
"image_std": [
|
10 |
+
0.229,
|
11 |
+
0.224,
|
12 |
+
0.225
|
13 |
+
],
|
14 |
+
"size": 224
|
15 |
+
}
|
pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5267c52d28fc6d392009db187e810cfa7c86228b62499faf9be5d49b388fe168
|
3 |
+
size 346161738
|