arieg commited on
Commit
7e7990b
·
1 Parent(s): b2a00cc

End of training

Browse files
Files changed (4) hide show
  1. README.md +59 -0
  2. config.json +186 -0
  3. preprocessor_config.json +22 -0
  4. tf_model.h5 +3 -0
README.md ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ base_model: google/vit-base-patch16-224-in21k
4
+ tags:
5
+ - generated_from_keras_callback
6
+ model-index:
7
+ - name: arieg/spec_cls_80
8
+ results: []
9
+ ---
10
+
11
+ <!-- This model card has been generated automatically according to the information Keras had access to. You should
12
+ probably proofread and complete it, then remove this comment. -->
13
+
14
+ # arieg/spec_cls_80
15
+
16
+ This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on an unknown dataset.
17
+ It achieves the following results on the evaluation set:
18
+ - Train Loss: 2.7760
19
+ - Validation Loss: 2.7406
20
+ - Train Accuracy: 0.975
21
+ - Epoch: 4
22
+
23
+ ## Model description
24
+
25
+ More information needed
26
+
27
+ ## Intended uses & limitations
28
+
29
+ More information needed
30
+
31
+ ## Training and evaluation data
32
+
33
+ More information needed
34
+
35
+ ## Training procedure
36
+
37
+ ### Training hyperparameters
38
+
39
+ The following hyperparameters were used during training:
40
+ - optimizer: {'name': 'AdamWeightDecay', 'clipnorm': 1.0, 'learning_rate': {'module': 'keras.optimizers.schedules', 'class_name': 'PolynomialDecay', 'config': {'initial_learning_rate': 3e-05, 'decay_steps': 7200, 'end_learning_rate': 0.0, 'power': 1.0, 'cycle': False, 'name': None}, 'registered_name': None}, 'decay': 0.0, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-08, 'amsgrad': False, 'weight_decay_rate': 0.01}
41
+ - training_precision: float32
42
+
43
+ ### Training results
44
+
45
+ | Train Loss | Validation Loss | Train Accuracy | Epoch |
46
+ |:----------:|:---------------:|:--------------:|:-----:|
47
+ | 4.2523 | 4.0977 | 0.5312 | 0 |
48
+ | 3.8658 | 3.7068 | 0.8562 | 1 |
49
+ | 3.4605 | 3.3486 | 0.9375 | 2 |
50
+ | 3.0940 | 3.0254 | 0.9563 | 3 |
51
+ | 2.7760 | 2.7406 | 0.975 | 4 |
52
+
53
+
54
+ ### Framework versions
55
+
56
+ - Transformers 4.35.0
57
+ - TensorFlow 2.14.0
58
+ - Datasets 2.14.6
59
+ - Tokenizers 0.14.1
config.json ADDED
@@ -0,0 +1,186 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "google/vit-base-patch16-224-in21k",
3
+ "architectures": [
4
+ "ViTForImageClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.0,
7
+ "encoder_stride": 16,
8
+ "hidden_act": "gelu",
9
+ "hidden_dropout_prob": 0.0,
10
+ "hidden_size": 768,
11
+ "id2label": {
12
+ "0": "56248",
13
+ "1": "56249",
14
+ "10": "56470",
15
+ "11": "56471",
16
+ "12": "56472",
17
+ "13": "56474",
18
+ "14": "56493",
19
+ "15": "56495",
20
+ "16": "56496",
21
+ "17": "56497",
22
+ "18": "56498",
23
+ "19": "56499",
24
+ "2": "56273",
25
+ "20": "56516",
26
+ "21": "56517",
27
+ "22": "56518",
28
+ "23": "56519",
29
+ "24": "56520",
30
+ "25": "56521",
31
+ "26": "56639",
32
+ "27": "56640",
33
+ "28": "56641",
34
+ "29": "56645",
35
+ "3": "56274",
36
+ "30": "56646",
37
+ "31": "56648",
38
+ "32": "56649",
39
+ "33": "56650",
40
+ "34": "56651",
41
+ "35": "56686",
42
+ "36": "56687",
43
+ "37": "56688",
44
+ "38": "56689",
45
+ "39": "56690",
46
+ "4": "56275",
47
+ "40": "56691",
48
+ "41": "56692",
49
+ "42": "56693",
50
+ "43": "56694",
51
+ "44": "56695",
52
+ "45": "56696",
53
+ "46": "56795",
54
+ "47": "56796",
55
+ "48": "56797",
56
+ "49": "56798",
57
+ "5": "56465",
58
+ "50": "56799",
59
+ "51": "56800",
60
+ "52": "56801",
61
+ "53": "56802",
62
+ "54": "56803",
63
+ "55": "56804",
64
+ "56": "56805",
65
+ "57": "56888",
66
+ "58": "57164",
67
+ "59": "57175",
68
+ "6": "56466",
69
+ "60": "57176",
70
+ "61": "57177",
71
+ "62": "57178",
72
+ "63": "57179",
73
+ "64": "57180",
74
+ "65": "57344",
75
+ "66": "57360",
76
+ "67": "57371",
77
+ "68": "57417",
78
+ "69": "57418",
79
+ "7": "56467",
80
+ "70": "57440",
81
+ "71": "57442",
82
+ "72": "57500",
83
+ "73": "57569",
84
+ "74": "57626",
85
+ "75": "57627",
86
+ "76": "57628",
87
+ "77": "57629",
88
+ "78": "57630",
89
+ "79": "57639",
90
+ "8": "56468",
91
+ "9": "56469"
92
+ },
93
+ "image_size": 224,
94
+ "initializer_range": 0.02,
95
+ "intermediate_size": 3072,
96
+ "label2id": {
97
+ "56248": "0",
98
+ "56249": "1",
99
+ "56273": "2",
100
+ "56274": "3",
101
+ "56275": "4",
102
+ "56465": "5",
103
+ "56466": "6",
104
+ "56467": "7",
105
+ "56468": "8",
106
+ "56469": "9",
107
+ "56470": "10",
108
+ "56471": "11",
109
+ "56472": "12",
110
+ "56474": "13",
111
+ "56493": "14",
112
+ "56495": "15",
113
+ "56496": "16",
114
+ "56497": "17",
115
+ "56498": "18",
116
+ "56499": "19",
117
+ "56516": "20",
118
+ "56517": "21",
119
+ "56518": "22",
120
+ "56519": "23",
121
+ "56520": "24",
122
+ "56521": "25",
123
+ "56639": "26",
124
+ "56640": "27",
125
+ "56641": "28",
126
+ "56645": "29",
127
+ "56646": "30",
128
+ "56648": "31",
129
+ "56649": "32",
130
+ "56650": "33",
131
+ "56651": "34",
132
+ "56686": "35",
133
+ "56687": "36",
134
+ "56688": "37",
135
+ "56689": "38",
136
+ "56690": "39",
137
+ "56691": "40",
138
+ "56692": "41",
139
+ "56693": "42",
140
+ "56694": "43",
141
+ "56695": "44",
142
+ "56696": "45",
143
+ "56795": "46",
144
+ "56796": "47",
145
+ "56797": "48",
146
+ "56798": "49",
147
+ "56799": "50",
148
+ "56800": "51",
149
+ "56801": "52",
150
+ "56802": "53",
151
+ "56803": "54",
152
+ "56804": "55",
153
+ "56805": "56",
154
+ "56888": "57",
155
+ "57164": "58",
156
+ "57175": "59",
157
+ "57176": "60",
158
+ "57177": "61",
159
+ "57178": "62",
160
+ "57179": "63",
161
+ "57180": "64",
162
+ "57344": "65",
163
+ "57360": "66",
164
+ "57371": "67",
165
+ "57417": "68",
166
+ "57418": "69",
167
+ "57440": "70",
168
+ "57442": "71",
169
+ "57500": "72",
170
+ "57569": "73",
171
+ "57626": "74",
172
+ "57627": "75",
173
+ "57628": "76",
174
+ "57629": "77",
175
+ "57630": "78",
176
+ "57639": "79"
177
+ },
178
+ "layer_norm_eps": 1e-12,
179
+ "model_type": "vit",
180
+ "num_attention_heads": 12,
181
+ "num_channels": 3,
182
+ "num_hidden_layers": 12,
183
+ "patch_size": 16,
184
+ "qkv_bias": true,
185
+ "transformers_version": "4.35.0"
186
+ }
preprocessor_config.json ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_normalize": true,
3
+ "do_rescale": true,
4
+ "do_resize": true,
5
+ "image_mean": [
6
+ 0.5,
7
+ 0.5,
8
+ 0.5
9
+ ],
10
+ "image_processor_type": "ViTImageProcessor",
11
+ "image_std": [
12
+ 0.5,
13
+ 0.5,
14
+ 0.5
15
+ ],
16
+ "resample": 2,
17
+ "rescale_factor": 0.00392156862745098,
18
+ "size": {
19
+ "height": 224,
20
+ "width": 224
21
+ }
22
+ }
tf_model.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:38ba04a486efdc19d352d9df6292f968109e1cc3fad5ec9aa831a3ddc4a8777d
3
+ size 343709368