bilbo991 commited on
Commit
1a59cc2
·
1 Parent(s): 8f7def5

End of training

Browse files
all_results.json ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 3.0,
3
+ "eval_loss": 2.8674657344818115,
4
+ "eval_runtime": 268.6011,
5
+ "eval_samples_per_second": 74.46,
6
+ "eval_steps_per_second": 2.327,
7
+ "train_loss": 2.2601124869791667,
8
+ "train_runtime": 7762.5186,
9
+ "train_samples_per_second": 38.647,
10
+ "train_steps_per_second": 1.208
11
+ }
eval_results.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 3.0,
3
+ "eval_loss": 2.8674657344818115,
4
+ "eval_runtime": 268.6011,
5
+ "eval_samples_per_second": 74.46,
6
+ "eval_steps_per_second": 2.327
7
+ }
preprocessor_config.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "crop_size": {
3
+ "height": 224,
4
+ "width": 224
5
+ },
6
+ "do_center_crop": true,
7
+ "do_convert_rgb": true,
8
+ "do_normalize": true,
9
+ "do_rescale": true,
10
+ "do_resize": true,
11
+ "image_mean": [
12
+ 0.48145466,
13
+ 0.4578275,
14
+ 0.40821073
15
+ ],
16
+ "image_processor_type": "CLIPImageProcessor",
17
+ "image_std": [
18
+ 0.26862954,
19
+ 0.26130258,
20
+ 0.27577711
21
+ ],
22
+ "processor_class": "VisionTextDualEncoderProcessor",
23
+ "resample": 3,
24
+ "rescale_factor": 0.00392156862745098,
25
+ "size": {
26
+ "shortest_edge": 224
27
+ }
28
+ }
runs/Aug15_17-11-13_cvrl-flynn-ws2/events.out.tfevents.1692142027.cvrl-flynn-ws2.14211.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3d3477ea8ba06a037dc77f94a1a9ea669038eab40191105d8f06880a3bbcf4d2
3
+ size 359
special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "clean_up_tokenization_spaces": true,
3
+ "cls_token": "[CLS]",
4
+ "do_basic_tokenize": true,
5
+ "do_lower_case": false,
6
+ "full_tokenizer_file": null,
7
+ "mask_token": "[MASK]",
8
+ "max_len": 512,
9
+ "model_max_length": 512,
10
+ "never_split": null,
11
+ "pad_token": "[PAD]",
12
+ "processor_class": "VisionTextDualEncoderProcessor",
13
+ "sep_token": "[SEP]",
14
+ "strip_accents": null,
15
+ "tokenize_chinese_chars": true,
16
+ "tokenizer_class": "DistilBertTokenizer",
17
+ "unk_token": "[UNK]"
18
+ }
train_results.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 3.0,
3
+ "train_loss": 2.2601124869791667,
4
+ "train_runtime": 7762.5186,
5
+ "train_samples_per_second": 38.647,
6
+ "train_steps_per_second": 1.208
7
+ }
trainer_state.json ADDED
@@ -0,0 +1,157 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 2.8674657344818115,
3
+ "best_model_checkpoint": "clip-br-100k/checkpoint-6250",
4
+ "epoch": 3.0,
5
+ "global_step": 9375,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 0.16,
12
+ "learning_rate": 4.7333333333333336e-05,
13
+ "loss": 3.364,
14
+ "step": 500
15
+ },
16
+ {
17
+ "epoch": 0.32,
18
+ "learning_rate": 4.466666666666667e-05,
19
+ "loss": 3.1853,
20
+ "step": 1000
21
+ },
22
+ {
23
+ "epoch": 0.48,
24
+ "learning_rate": 4.2e-05,
25
+ "loss": 3.0724,
26
+ "step": 1500
27
+ },
28
+ {
29
+ "epoch": 0.64,
30
+ "learning_rate": 3.933333333333333e-05,
31
+ "loss": 2.9594,
32
+ "step": 2000
33
+ },
34
+ {
35
+ "epoch": 0.8,
36
+ "learning_rate": 3.6666666666666666e-05,
37
+ "loss": 2.8763,
38
+ "step": 2500
39
+ },
40
+ {
41
+ "epoch": 0.96,
42
+ "learning_rate": 3.4000000000000007e-05,
43
+ "loss": 2.7977,
44
+ "step": 3000
45
+ },
46
+ {
47
+ "epoch": 1.0,
48
+ "eval_loss": 3.0236034393310547,
49
+ "eval_runtime": 269.4873,
50
+ "eval_samples_per_second": 74.215,
51
+ "eval_steps_per_second": 2.319,
52
+ "step": 3125
53
+ },
54
+ {
55
+ "epoch": 1.12,
56
+ "learning_rate": 3.1333333333333334e-05,
57
+ "loss": 2.4752,
58
+ "step": 3500
59
+ },
60
+ {
61
+ "epoch": 1.28,
62
+ "learning_rate": 2.8666666666666668e-05,
63
+ "loss": 2.3537,
64
+ "step": 4000
65
+ },
66
+ {
67
+ "epoch": 1.44,
68
+ "learning_rate": 2.6000000000000002e-05,
69
+ "loss": 2.3224,
70
+ "step": 4500
71
+ },
72
+ {
73
+ "epoch": 1.6,
74
+ "learning_rate": 2.3333333333333336e-05,
75
+ "loss": 2.2794,
76
+ "step": 5000
77
+ },
78
+ {
79
+ "epoch": 1.76,
80
+ "learning_rate": 2.0666666666666666e-05,
81
+ "loss": 2.2444,
82
+ "step": 5500
83
+ },
84
+ {
85
+ "epoch": 1.92,
86
+ "learning_rate": 1.8e-05,
87
+ "loss": 2.2202,
88
+ "step": 6000
89
+ },
90
+ {
91
+ "epoch": 2.0,
92
+ "eval_loss": 2.8674657344818115,
93
+ "eval_runtime": 269.0568,
94
+ "eval_samples_per_second": 74.334,
95
+ "eval_steps_per_second": 2.323,
96
+ "step": 6250
97
+ },
98
+ {
99
+ "epoch": 2.08,
100
+ "learning_rate": 1.5333333333333334e-05,
101
+ "loss": 1.8795,
102
+ "step": 6500
103
+ },
104
+ {
105
+ "epoch": 2.24,
106
+ "learning_rate": 1.2666666666666668e-05,
107
+ "loss": 1.501,
108
+ "step": 7000
109
+ },
110
+ {
111
+ "epoch": 2.4,
112
+ "learning_rate": 1e-05,
113
+ "loss": 1.4826,
114
+ "step": 7500
115
+ },
116
+ {
117
+ "epoch": 2.56,
118
+ "learning_rate": 7.333333333333334e-06,
119
+ "loss": 1.4807,
120
+ "step": 8000
121
+ },
122
+ {
123
+ "epoch": 2.72,
124
+ "learning_rate": 4.666666666666667e-06,
125
+ "loss": 1.446,
126
+ "step": 8500
127
+ },
128
+ {
129
+ "epoch": 2.88,
130
+ "learning_rate": 2.0000000000000003e-06,
131
+ "loss": 1.4086,
132
+ "step": 9000
133
+ },
134
+ {
135
+ "epoch": 3.0,
136
+ "eval_loss": 3.140260934829712,
137
+ "eval_runtime": 269.6762,
138
+ "eval_samples_per_second": 74.163,
139
+ "eval_steps_per_second": 2.318,
140
+ "step": 9375
141
+ },
142
+ {
143
+ "epoch": 3.0,
144
+ "step": 9375,
145
+ "total_flos": 3.01208373504e+16,
146
+ "train_loss": 2.2601124869791667,
147
+ "train_runtime": 7762.5186,
148
+ "train_samples_per_second": 38.647,
149
+ "train_steps_per_second": 1.208
150
+ }
151
+ ],
152
+ "max_steps": 9375,
153
+ "num_train_epochs": 3,
154
+ "total_flos": 3.01208373504e+16,
155
+ "trial_name": null,
156
+ "trial_params": null
157
+ }
vocab.txt ADDED
The diff for this file is too large to render. See raw diff