Safetensors
English
qwen2_vl
remote-sensing
AdaptLLM commited on
Commit
23d10b4
·
verified ·
1 Parent(s): 494c349

Upload folder using huggingface_hub

Browse files
added_tokens.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "<|box_end|>": 151649,
3
+ "<|box_start|>": 151648,
4
+ "<|endoftext|>": 151643,
5
+ "<|im_end|>": 151645,
6
+ "<|im_start|>": 151644,
7
+ "<|image_pad|>": 151655,
8
+ "<|object_ref_end|>": 151647,
9
+ "<|object_ref_start|>": 151646,
10
+ "<|quad_end|>": 151651,
11
+ "<|quad_start|>": 151650,
12
+ "<|video_pad|>": 151656,
13
+ "<|vision_end|>": 151653,
14
+ "<|vision_pad|>": 151654,
15
+ "<|vision_start|>": 151652
16
+ }
all_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 1.0,
3
+ "total_flos": 1.4819175628485427e+17,
4
+ "train_loss": 0.8990087810315583,
5
+ "train_runtime": 350.8438,
6
+ "train_samples_per_second": 103.861,
7
+ "train_steps_per_second": 0.812
8
+ }
chat_template.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "chat_template": "{% set image_count = namespace(value=0) %}{% set video_count = namespace(value=0) %}{% for message in messages %}{% if loop.first and message['role'] != 'system' %}<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n{% endif %}<|im_start|>{{ message['role'] }}\n{% if message['content'] is string %}{{ message['content'] }}<|im_end|>\n{% else %}{% for content in message['content'] %}{% if content['type'] == 'image' or 'image' in content or 'image_url' in content %}{% set image_count.value = image_count.value + 1 %}{% if add_vision_id %}Picture {{ image_count.value }}: {% endif %}<|vision_start|><|image_pad|><|vision_end|>{% elif content['type'] == 'video' or 'video' in content %}{% set video_count.value = video_count.value + 1 %}{% if add_vision_id %}Video {{ video_count.value }}: {% endif %}<|vision_start|><|video_pad|><|vision_end|>{% elif 'text' in content %}{{ content['text'] }}{% endif %}{% endfor %}<|im_end|>\n{% endif %}{% endfor %}{% if add_generation_prompt %}<|im_start|>assistant\n{% endif %}"
3
+ }
config.json ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "Qwen2VLForConditionalGeneration"
4
+ ],
5
+ "attention_dropout": 0.0,
6
+ "bos_token_id": 151643,
7
+ "eos_token_id": 151645,
8
+ "hidden_act": "silu",
9
+ "hidden_size": 1536,
10
+ "image_token_id": 151655,
11
+ "initializer_range": 0.02,
12
+ "intermediate_size": 8960,
13
+ "max_position_embeddings": 32768,
14
+ "max_window_layers": 28,
15
+ "model_type": "qwen2_vl",
16
+ "num_attention_heads": 12,
17
+ "num_hidden_layers": 28,
18
+ "num_key_value_heads": 2,
19
+ "rms_norm_eps": 1e-06,
20
+ "rope_scaling": {
21
+ "mrope_section": [
22
+ 16,
23
+ 24,
24
+ 24
25
+ ],
26
+ "type": "mrope"
27
+ },
28
+ "rope_theta": 1000000.0,
29
+ "sliding_window": 32768,
30
+ "tie_word_embeddings": true,
31
+ "torch_dtype": "bfloat16",
32
+ "transformers_version": "4.45.0.dev0",
33
+ "use_cache": false,
34
+ "use_sliding_window": false,
35
+ "video_token_id": 151656,
36
+ "vision_config": {
37
+ "hidden_size": 1536,
38
+ "in_chans": 3,
39
+ "model_type": "qwen2_vl",
40
+ "spatial_patch_size": 14
41
+ },
42
+ "vision_end_token_id": 151653,
43
+ "vision_start_token_id": 151652,
44
+ "vision_token_id": 151654,
45
+ "vocab_size": 151936
46
+ }
generation_config.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 151643,
3
+ "do_sample": true,
4
+ "eos_token_id": [
5
+ 151645,
6
+ 151643
7
+ ],
8
+ "pad_token_id": 151643,
9
+ "temperature": 0.01,
10
+ "top_k": 1,
11
+ "top_p": 0.001,
12
+ "transformers_version": "4.45.0.dev0"
13
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7901956be7ababfff6d313d6d1df43d13863ff62df6af2f2d07401782307e143
3
+ size 4884798456
preprocessor_config.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_convert_rgb": true,
3
+ "do_normalize": true,
4
+ "do_rescale": true,
5
+ "do_resize": true,
6
+ "image_mean": [
7
+ 0.48145466,
8
+ 0.4578275,
9
+ 0.40821073
10
+ ],
11
+ "image_processor_type": "Qwen2VLImageProcessor",
12
+ "image_std": [
13
+ 0.26862954,
14
+ 0.26130258,
15
+ 0.27577711
16
+ ],
17
+ "max_pixels": 12845056,
18
+ "merge_size": 2,
19
+ "min_pixels": 3136,
20
+ "patch_size": 14,
21
+ "processor_class": "Qwen2VLProcessor",
22
+ "resample": 3,
23
+ "rescale_factor": 0.00392156862745098,
24
+ "size": {
25
+ "max_pixels": 12845056,
26
+ "min_pixels": 3136
27
+ },
28
+ "temporal_patch_size": 2
29
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>",
5
+ "<|object_ref_start|>",
6
+ "<|object_ref_end|>",
7
+ "<|box_start|>",
8
+ "<|box_end|>",
9
+ "<|quad_start|>",
10
+ "<|quad_end|>",
11
+ "<|vision_start|>",
12
+ "<|vision_end|>",
13
+ "<|vision_pad|>",
14
+ "<|image_pad|>",
15
+ "<|video_pad|>"
16
+ ],
17
+ "eos_token": {
18
+ "content": "<|im_end|>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ },
24
+ "pad_token": {
25
+ "content": "<|endoftext|>",
26
+ "lstrip": false,
27
+ "normalized": false,
28
+ "rstrip": false,
29
+ "single_word": false
30
+ }
31
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "151643": {
5
+ "content": "<|endoftext|>",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "151644": {
13
+ "content": "<|im_start|>",
14
+ "lstrip": false,
15
+ "normalized": false,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "151645": {
21
+ "content": "<|im_end|>",
22
+ "lstrip": false,
23
+ "normalized": false,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ },
28
+ "151646": {
29
+ "content": "<|object_ref_start|>",
30
+ "lstrip": false,
31
+ "normalized": false,
32
+ "rstrip": false,
33
+ "single_word": false,
34
+ "special": true
35
+ },
36
+ "151647": {
37
+ "content": "<|object_ref_end|>",
38
+ "lstrip": false,
39
+ "normalized": false,
40
+ "rstrip": false,
41
+ "single_word": false,
42
+ "special": true
43
+ },
44
+ "151648": {
45
+ "content": "<|box_start|>",
46
+ "lstrip": false,
47
+ "normalized": false,
48
+ "rstrip": false,
49
+ "single_word": false,
50
+ "special": true
51
+ },
52
+ "151649": {
53
+ "content": "<|box_end|>",
54
+ "lstrip": false,
55
+ "normalized": false,
56
+ "rstrip": false,
57
+ "single_word": false,
58
+ "special": true
59
+ },
60
+ "151650": {
61
+ "content": "<|quad_start|>",
62
+ "lstrip": false,
63
+ "normalized": false,
64
+ "rstrip": false,
65
+ "single_word": false,
66
+ "special": true
67
+ },
68
+ "151651": {
69
+ "content": "<|quad_end|>",
70
+ "lstrip": false,
71
+ "normalized": false,
72
+ "rstrip": false,
73
+ "single_word": false,
74
+ "special": true
75
+ },
76
+ "151652": {
77
+ "content": "<|vision_start|>",
78
+ "lstrip": false,
79
+ "normalized": false,
80
+ "rstrip": false,
81
+ "single_word": false,
82
+ "special": true
83
+ },
84
+ "151653": {
85
+ "content": "<|vision_end|>",
86
+ "lstrip": false,
87
+ "normalized": false,
88
+ "rstrip": false,
89
+ "single_word": false,
90
+ "special": true
91
+ },
92
+ "151654": {
93
+ "content": "<|vision_pad|>",
94
+ "lstrip": false,
95
+ "normalized": false,
96
+ "rstrip": false,
97
+ "single_word": false,
98
+ "special": true
99
+ },
100
+ "151655": {
101
+ "content": "<|image_pad|>",
102
+ "lstrip": false,
103
+ "normalized": false,
104
+ "rstrip": false,
105
+ "single_word": false,
106
+ "special": true
107
+ },
108
+ "151656": {
109
+ "content": "<|video_pad|>",
110
+ "lstrip": false,
111
+ "normalized": false,
112
+ "rstrip": false,
113
+ "single_word": false,
114
+ "special": true
115
+ }
116
+ },
117
+ "additional_special_tokens": ["<|im_start|>", "<|im_end|>", "<|object_ref_start|>","<|object_ref_end|>","<|box_start|>","<|box_end|>","<|quad_start|>","<|quad_end|>","<|vision_start|>","<|vision_end|>","<|vision_pad|>","<|image_pad|>","<|video_pad|>"],
118
+ "bos_token": null,
119
+ "chat_template": "{% set image_count = namespace(value=0) %}{% set video_count = namespace(value=0) %}{% for message in messages %}{% if loop.first and message['role'] != 'system' %}<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n{% endif %}<|im_start|>{{ message['role'] }}\n{% if message['content'] is string %}{{ message['content'] }}<|im_end|>\n{% else %}{% for content in message['content'] %}{% if content['type'] == 'image' or 'image' in content or 'image_url' in content %}{% set image_count.value = image_count.value + 1 %}{% if add_vision_id %}Picture {{ image_count.value }}: {% endif %}<|vision_start|><|image_pad|><|vision_end|>{% elif content['type'] == 'video' or 'video' in content %}{% set video_count.value = video_count.value + 1 %}{% if add_vision_id %}Video {{ video_count.value }}: {% endif %}<|vision_start|><|video_pad|><|vision_end|>{% elif 'text' in content %}{{ content['text'] }}{% endif %}{% endfor %}<|im_end|>\n{% endif %}{% endfor %}{% if add_generation_prompt %}<|im_start|>assistant\n{% endif %}",
120
+ "clean_up_tokenization_spaces": false,
121
+ "eos_token": "<|im_end|>",
122
+ "padding_side": "left",
123
+ "errors": "replace",
124
+ "model_max_length": 32768,
125
+ "pad_token": "<|endoftext|>",
126
+ "split_special_tokens": false,
127
+ "tokenizer_class": "Qwen2Tokenizer",
128
+ "unk_token": null
129
+ }
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 1.0,
3
+ "total_flos": 1.4819175628485427e+17,
4
+ "train_loss": 0.8990087810315583,
5
+ "train_runtime": 350.8438,
6
+ "train_samples_per_second": 103.861,
7
+ "train_steps_per_second": 0.812
8
+ }
trainer_log.jsonl ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {"current_steps": 10, "total_steps": 285, "loss": 1.8558, "learning_rate": 3.448275862068966e-06, "epoch": 0.03508771929824561, "percentage": 3.51, "elapsed_time": "0:00:13", "remaining_time": "0:06:18"}
2
+ {"current_steps": 20, "total_steps": 285, "loss": 1.4458, "learning_rate": 6.896551724137932e-06, "epoch": 0.07017543859649122, "percentage": 7.02, "elapsed_time": "0:00:25", "remaining_time": "0:05:40"}
3
+ {"current_steps": 30, "total_steps": 285, "loss": 1.2409, "learning_rate": 9.999623509195724e-06, "epoch": 0.10526315789473684, "percentage": 10.53, "elapsed_time": "0:00:37", "remaining_time": "0:05:18"}
4
+ {"current_steps": 40, "total_steps": 285, "loss": 1.1153, "learning_rate": 9.9545131771389e-06, "epoch": 0.14035087719298245, "percentage": 14.04, "elapsed_time": "0:00:49", "remaining_time": "0:05:01"}
5
+ {"current_steps": 50, "total_steps": 285, "loss": 1.0284, "learning_rate": 9.834882355224261e-06, "epoch": 0.17543859649122806, "percentage": 17.54, "elapsed_time": "0:01:00", "remaining_time": "0:04:44"}
6
+ {"current_steps": 60, "total_steps": 285, "loss": 0.9903, "learning_rate": 9.64253040236608e-06, "epoch": 0.21052631578947367, "percentage": 21.05, "elapsed_time": "0:01:12", "remaining_time": "0:04:31"}
7
+ {"current_steps": 70, "total_steps": 285, "loss": 0.9464, "learning_rate": 9.380350470977033e-06, "epoch": 0.24561403508771928, "percentage": 24.56, "elapsed_time": "0:01:24", "remaining_time": "0:04:20"}
8
+ {"current_steps": 80, "total_steps": 285, "loss": 0.9232, "learning_rate": 9.052285991262975e-06, "epoch": 0.2807017543859649, "percentage": 28.07, "elapsed_time": "0:01:37", "remaining_time": "0:04:08"}
9
+ {"current_steps": 90, "total_steps": 285, "loss": 0.8927, "learning_rate": 8.663271358362064e-06, "epoch": 0.3157894736842105, "percentage": 31.58, "elapsed_time": "0:01:49", "remaining_time": "0:03:56"}
10
+ {"current_steps": 100, "total_steps": 285, "loss": 0.8748, "learning_rate": 8.219157714448957e-06, "epoch": 0.3508771929824561, "percentage": 35.09, "elapsed_time": "0:02:00", "remaining_time": "0:03:42"}
11
+ {"current_steps": 110, "total_steps": 285, "loss": 0.8712, "learning_rate": 7.726624942110233e-06, "epoch": 0.38596491228070173, "percentage": 38.6, "elapsed_time": "0:02:11", "remaining_time": "0:03:29"}
12
+ {"current_steps": 120, "total_steps": 285, "loss": 0.8413, "learning_rate": 7.193081192692639e-06, "epoch": 0.42105263157894735, "percentage": 42.11, "elapsed_time": "0:02:24", "remaining_time": "0:03:18"}
13
+ {"current_steps": 130, "total_steps": 285, "loss": 0.8245, "learning_rate": 6.626551460811316e-06, "epoch": 0.45614035087719296, "percentage": 45.61, "elapsed_time": "0:02:35", "remaining_time": "0:03:05"}
14
+ {"current_steps": 140, "total_steps": 285, "loss": 0.7995, "learning_rate": 6.035556880961093e-06, "epoch": 0.49122807017543857, "percentage": 49.12, "elapsed_time": "0:02:48", "remaining_time": "0:02:54"}
15
+ {"current_steps": 150, "total_steps": 285, "loss": 0.7919, "learning_rate": 5.4289865617222005e-06, "epoch": 0.5263157894736842, "percentage": 52.63, "elapsed_time": "0:02:59", "remaining_time": "0:02:41"}
16
+ {"current_steps": 160, "total_steps": 285, "loss": 0.794, "learning_rate": 4.815963885293206e-06, "epoch": 0.5614035087719298, "percentage": 56.14, "elapsed_time": "0:03:10", "remaining_time": "0:02:28"}
17
+ {"current_steps": 170, "total_steps": 285, "loss": 0.7713, "learning_rate": 4.205709283330694e-06, "epoch": 0.5964912280701754, "percentage": 59.65, "elapsed_time": "0:03:22", "remaining_time": "0:02:16"}
18
+ {"current_steps": 180, "total_steps": 285, "loss": 0.7775, "learning_rate": 3.6074015530747354e-06, "epoch": 0.631578947368421, "percentage": 63.16, "elapsed_time": "0:03:34", "remaining_time": "0:02:05"}
19
+ {"current_steps": 190, "total_steps": 285, "loss": 0.7515, "learning_rate": 3.0300397996947604e-06, "epoch": 0.6666666666666666, "percentage": 66.67, "elapsed_time": "0:03:46", "remaining_time": "0:01:53"}
20
+ {"current_steps": 200, "total_steps": 285, "loss": 0.765, "learning_rate": 2.482308081371413e-06, "epoch": 0.7017543859649122, "percentage": 70.18, "elapsed_time": "0:03:57", "remaining_time": "0:01:41"}
21
+ {"current_steps": 210, "total_steps": 285, "loss": 0.7528, "learning_rate": 1.972444792978373e-06, "epoch": 0.7368421052631579, "percentage": 73.68, "elapsed_time": "0:04:09", "remaining_time": "0:01:28"}
22
+ {"current_steps": 220, "total_steps": 285, "loss": 0.7396, "learning_rate": 1.508118752955136e-06, "epoch": 0.7719298245614035, "percentage": 77.19, "elapsed_time": "0:04:20", "remaining_time": "0:01:16"}
23
+ {"current_steps": 230, "total_steps": 285, "loss": 0.7408, "learning_rate": 1.0963138571395277e-06, "epoch": 0.8070175438596491, "percentage": 80.7, "elapsed_time": "0:04:32", "remaining_time": "0:01:05"}
24
+ {"current_steps": 240, "total_steps": 285, "loss": 0.7486, "learning_rate": 7.43224034473674e-07, "epoch": 0.8421052631578947, "percentage": 84.21, "elapsed_time": "0:04:44", "remaining_time": "0:00:53"}
25
+ {"current_steps": 250, "total_steps": 285, "loss": 0.7359, "learning_rate": 4.5416008454738813e-07, "epoch": 0.8771929824561403, "percentage": 87.72, "elapsed_time": "0:04:56", "remaining_time": "0:00:41"}
26
+ {"current_steps": 260, "total_steps": 285, "loss": 0.7375, "learning_rate": 2.3346979822903071e-07, "epoch": 0.9122807017543859, "percentage": 91.23, "elapsed_time": "0:05:08", "remaining_time": "0:00:29"}
27
+ {"current_steps": 270, "total_steps": 285, "loss": 0.7245, "learning_rate": 8.447256284391858e-08, "epoch": 0.9473684210526315, "percentage": 94.74, "elapsed_time": "0:05:19", "remaining_time": "0:00:17"}
28
+ {"current_steps": 280, "total_steps": 285, "loss": 0.7378, "learning_rate": 9.409435499254105e-09, "epoch": 0.9824561403508771, "percentage": 98.25, "elapsed_time": "0:05:32", "remaining_time": "0:00:05"}
29
+ {"current_steps": 285, "total_steps": 285, "epoch": 1.0, "percentage": 100.0, "elapsed_time": "0:05:50", "remaining_time": "0:00:00"}
trainer_state.json ADDED
@@ -0,0 +1,238 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 1.0,
5
+ "eval_steps": 500,
6
+ "global_step": 285,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.03508771929824561,
13
+ "grad_norm": 6.543553342182306,
14
+ "learning_rate": 3.448275862068966e-06,
15
+ "loss": 1.8558,
16
+ "step": 10
17
+ },
18
+ {
19
+ "epoch": 0.07017543859649122,
20
+ "grad_norm": 3.5154288610740023,
21
+ "learning_rate": 6.896551724137932e-06,
22
+ "loss": 1.4458,
23
+ "step": 20
24
+ },
25
+ {
26
+ "epoch": 0.10526315789473684,
27
+ "grad_norm": 2.778543537236469,
28
+ "learning_rate": 9.999623509195724e-06,
29
+ "loss": 1.2409,
30
+ "step": 30
31
+ },
32
+ {
33
+ "epoch": 0.14035087719298245,
34
+ "grad_norm": 2.5059789149316045,
35
+ "learning_rate": 9.9545131771389e-06,
36
+ "loss": 1.1153,
37
+ "step": 40
38
+ },
39
+ {
40
+ "epoch": 0.17543859649122806,
41
+ "grad_norm": 2.33426924896289,
42
+ "learning_rate": 9.834882355224261e-06,
43
+ "loss": 1.0284,
44
+ "step": 50
45
+ },
46
+ {
47
+ "epoch": 0.21052631578947367,
48
+ "grad_norm": 2.238033736817208,
49
+ "learning_rate": 9.64253040236608e-06,
50
+ "loss": 0.9903,
51
+ "step": 60
52
+ },
53
+ {
54
+ "epoch": 0.24561403508771928,
55
+ "grad_norm": 2.172103858227053,
56
+ "learning_rate": 9.380350470977033e-06,
57
+ "loss": 0.9464,
58
+ "step": 70
59
+ },
60
+ {
61
+ "epoch": 0.2807017543859649,
62
+ "grad_norm": 2.0775067726943957,
63
+ "learning_rate": 9.052285991262975e-06,
64
+ "loss": 0.9232,
65
+ "step": 80
66
+ },
67
+ {
68
+ "epoch": 0.3157894736842105,
69
+ "grad_norm": 2.036467828021783,
70
+ "learning_rate": 8.663271358362064e-06,
71
+ "loss": 0.8927,
72
+ "step": 90
73
+ },
74
+ {
75
+ "epoch": 0.3508771929824561,
76
+ "grad_norm": 2.092139892865783,
77
+ "learning_rate": 8.219157714448957e-06,
78
+ "loss": 0.8748,
79
+ "step": 100
80
+ },
81
+ {
82
+ "epoch": 0.38596491228070173,
83
+ "grad_norm": 2.354936030454241,
84
+ "learning_rate": 7.726624942110233e-06,
85
+ "loss": 0.8712,
86
+ "step": 110
87
+ },
88
+ {
89
+ "epoch": 0.42105263157894735,
90
+ "grad_norm": 2.4480552576537313,
91
+ "learning_rate": 7.193081192692639e-06,
92
+ "loss": 0.8413,
93
+ "step": 120
94
+ },
95
+ {
96
+ "epoch": 0.45614035087719296,
97
+ "grad_norm": 2.225961040994733,
98
+ "learning_rate": 6.626551460811316e-06,
99
+ "loss": 0.8245,
100
+ "step": 130
101
+ },
102
+ {
103
+ "epoch": 0.49122807017543857,
104
+ "grad_norm": 2.2280118933835227,
105
+ "learning_rate": 6.035556880961093e-06,
106
+ "loss": 0.7995,
107
+ "step": 140
108
+ },
109
+ {
110
+ "epoch": 0.5263157894736842,
111
+ "grad_norm": 2.1092585894132694,
112
+ "learning_rate": 5.4289865617222005e-06,
113
+ "loss": 0.7919,
114
+ "step": 150
115
+ },
116
+ {
117
+ "epoch": 0.5614035087719298,
118
+ "grad_norm": 2.2317869413724947,
119
+ "learning_rate": 4.815963885293206e-06,
120
+ "loss": 0.794,
121
+ "step": 160
122
+ },
123
+ {
124
+ "epoch": 0.5964912280701754,
125
+ "grad_norm": 2.1555810331504976,
126
+ "learning_rate": 4.205709283330694e-06,
127
+ "loss": 0.7713,
128
+ "step": 170
129
+ },
130
+ {
131
+ "epoch": 0.631578947368421,
132
+ "grad_norm": 2.0442009159494785,
133
+ "learning_rate": 3.6074015530747354e-06,
134
+ "loss": 0.7775,
135
+ "step": 180
136
+ },
137
+ {
138
+ "epoch": 0.6666666666666666,
139
+ "grad_norm": 2.113365321780909,
140
+ "learning_rate": 3.0300397996947604e-06,
141
+ "loss": 0.7515,
142
+ "step": 190
143
+ },
144
+ {
145
+ "epoch": 0.7017543859649122,
146
+ "grad_norm": 2.1674953587420838,
147
+ "learning_rate": 2.482308081371413e-06,
148
+ "loss": 0.765,
149
+ "step": 200
150
+ },
151
+ {
152
+ "epoch": 0.7368421052631579,
153
+ "grad_norm": 2.2304080548395917,
154
+ "learning_rate": 1.972444792978373e-06,
155
+ "loss": 0.7528,
156
+ "step": 210
157
+ },
158
+ {
159
+ "epoch": 0.7719298245614035,
160
+ "grad_norm": 2.40752310761597,
161
+ "learning_rate": 1.508118752955136e-06,
162
+ "loss": 0.7396,
163
+ "step": 220
164
+ },
165
+ {
166
+ "epoch": 0.8070175438596491,
167
+ "grad_norm": 2.117568701663915,
168
+ "learning_rate": 1.0963138571395277e-06,
169
+ "loss": 0.7408,
170
+ "step": 230
171
+ },
172
+ {
173
+ "epoch": 0.8421052631578947,
174
+ "grad_norm": 2.919503306825211,
175
+ "learning_rate": 7.43224034473674e-07,
176
+ "loss": 0.7486,
177
+ "step": 240
178
+ },
179
+ {
180
+ "epoch": 0.8771929824561403,
181
+ "grad_norm": 2.125280125804679,
182
+ "learning_rate": 4.5416008454738813e-07,
183
+ "loss": 0.7359,
184
+ "step": 250
185
+ },
186
+ {
187
+ "epoch": 0.9122807017543859,
188
+ "grad_norm": 2.066124288511879,
189
+ "learning_rate": 2.3346979822903071e-07,
190
+ "loss": 0.7375,
191
+ "step": 260
192
+ },
193
+ {
194
+ "epoch": 0.9473684210526315,
195
+ "grad_norm": 1.9772889405004763,
196
+ "learning_rate": 8.447256284391858e-08,
197
+ "loss": 0.7245,
198
+ "step": 270
199
+ },
200
+ {
201
+ "epoch": 0.9824561403508771,
202
+ "grad_norm": 1.9570228116416033,
203
+ "learning_rate": 9.409435499254105e-09,
204
+ "loss": 0.7378,
205
+ "step": 280
206
+ },
207
+ {
208
+ "epoch": 1.0,
209
+ "step": 285,
210
+ "total_flos": 1.4819175628485427e+17,
211
+ "train_loss": 0.8990087810315583,
212
+ "train_runtime": 350.8438,
213
+ "train_samples_per_second": 103.861,
214
+ "train_steps_per_second": 0.812
215
+ }
216
+ ],
217
+ "logging_steps": 10,
218
+ "max_steps": 285,
219
+ "num_input_tokens_seen": 0,
220
+ "num_train_epochs": 1,
221
+ "save_steps": 500,
222
+ "stateful_callbacks": {
223
+ "TrainerControl": {
224
+ "args": {
225
+ "should_epoch_stop": false,
226
+ "should_evaluate": false,
227
+ "should_log": false,
228
+ "should_save": true,
229
+ "should_training_stop": true
230
+ },
231
+ "attributes": {}
232
+ }
233
+ },
234
+ "total_flos": 1.4819175628485427e+17,
235
+ "train_batch_size": 8,
236
+ "trial_name": null,
237
+ "trial_params": null
238
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:917afe976f6d4c3b709d5fd40b3a87a682f30c20237b75ae8eedc3ae7fb4f468
3
+ size 6776
vocab.json ADDED
The diff for this file is too large to render. See raw diff