rohitnagareddy commited on
Commit
2add5ab
·
verified ·
1 Parent(s): d909236

Upload fine-tuned QLoRA model

Browse files
added_tokens.json ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "<|execute_end|>": 73444,
3
+ "<|execute_start|>": 73443,
4
+ "<|fim_middle|>": 73446,
5
+ "<|fim_prefix|>": 73445,
6
+ "<|fim_suffix|>": 73447,
7
+ "<|im_end|>": 73440,
8
+ "<|im_start|>": 73441,
9
+ "<|tool_call|>": 73442
10
+ }
chat_template.jinja ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {% for message in messages %}{{'<|im_start|>' + message['role'] + '
2
+ ' + message['content'] + '<|im_end|>' + '
3
+ '}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant
4
+ ' }}{% endif %}
config.json ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "MiniCPMForCausalLM"
4
+ ],
5
+ "attention_bias": false,
6
+ "attention_dropout": 0.0,
7
+ "auto_map": {
8
+ "AutoConfig": "openbmb/MiniCPM4-0.5B--configuration_minicpm.MiniCPMConfig",
9
+ "AutoModel": "openbmb/MiniCPM4-0.5B--modeling_minicpm.MiniCPMModel",
10
+ "AutoModelForCausalLM": "openbmb/MiniCPM4-0.5B--modeling_minicpm.MiniCPMForCausalLM",
11
+ "AutoModelForSeq2SeqLM": "openbmb/MiniCPM4-0.5B--modeling_minicpm.MiniCPMForCausalLM",
12
+ "AutoModelForSequenceClassification": "openbmb/MiniCPM4-0.5B--modeling_minicpm.MiniCPMForSequenceClassification"
13
+ },
14
+ "bos_token_id": 1,
15
+ "dim_model_base": 256,
16
+ "eos_token_id": [
17
+ 2,
18
+ 73440
19
+ ],
20
+ "hidden_act": "silu",
21
+ "hidden_size": 1024,
22
+ "initializer_range": 0.1,
23
+ "intermediate_size": 4096,
24
+ "max_position_embeddings": 32768,
25
+ "model_type": "minicpm",
26
+ "mup_denominator": null,
27
+ "num_attention_heads": 16,
28
+ "num_hidden_layers": 24,
29
+ "num_key_value_heads": 2,
30
+ "pretraining_tp": 1,
31
+ "rms_norm_eps": 1e-05,
32
+ "rope_scaling": {
33
+ "long_factor": [
34
+ 1.0004360675811768,
35
+ 1.0668443441390991,
36
+ 1.1631425619125366,
37
+ 1.3025742769241333,
38
+ 1.5040205717086792,
39
+ 1.7941505908966064,
40
+ 2.2101221084594727,
41
+ 2.802666664123535,
42
+ 3.6389970779418945,
43
+ 4.804192543029785,
44
+ 6.39855432510376,
45
+ 8.527148246765137,
46
+ 11.277542114257812,
47
+ 14.684998512268066,
48
+ 18.69317054748535,
49
+ 23.13019371032715,
50
+ 27.72362518310547,
51
+ 32.1606559753418,
52
+ 36.168827056884766,
53
+ 39.57627868652344,
54
+ 42.32667541503906,
55
+ 44.45526885986328,
56
+ 46.04962921142578,
57
+ 47.21482849121094,
58
+ 48.05115509033203,
59
+ 48.64370346069336,
60
+ 49.05967712402344,
61
+ 49.34980392456055,
62
+ 49.551246643066406,
63
+ 49.69068145751953,
64
+ 49.78697967529297,
65
+ 49.85338592529297
66
+ ],
67
+ "original_max_position_embeddings": 32768,
68
+ "rope_type": "longrope",
69
+ "short_factor": [
70
+ 1.0004360675811768,
71
+ 1.0668443441390991,
72
+ 1.1631425619125366,
73
+ 1.3025742769241333,
74
+ 1.5040205717086792,
75
+ 1.7941505908966064,
76
+ 2.2101221084594727,
77
+ 2.802666664123535,
78
+ 3.6389970779418945,
79
+ 4.804192543029785,
80
+ 6.39855432510376,
81
+ 8.527148246765137,
82
+ 11.277542114257812,
83
+ 14.684998512268066,
84
+ 18.69317054748535,
85
+ 23.13019371032715,
86
+ 27.72362518310547,
87
+ 32.1606559753418,
88
+ 36.168827056884766,
89
+ 39.57627868652344,
90
+ 42.32667541503906,
91
+ 44.45526885986328,
92
+ 46.04962921142578,
93
+ 47.21482849121094,
94
+ 48.05115509033203,
95
+ 48.64370346069336,
96
+ 49.05967712402344,
97
+ 49.34980392456055,
98
+ 49.551246643066406,
99
+ 49.69068145751953,
100
+ 49.78697967529297,
101
+ 49.85338592529297
102
+ ]
103
+ },
104
+ "rope_theta": 10000.0,
105
+ "scale_depth": 1.4,
106
+ "scale_emb": 12,
107
+ "sparse_config": null,
108
+ "torch_dtype": "float16",
109
+ "transformers_version": "4.52.4",
110
+ "use_cache": true,
111
+ "vocab_size": 73448
112
+ }
generation_config.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 1,
3
+ "do_sample": true,
4
+ "eos_token_id": [
5
+ 2,
6
+ 73440
7
+ ],
8
+ "pad_token_id": 2,
9
+ "temperature": 0.8,
10
+ "top_p": 0.8,
11
+ "transformers_version": "4.52.4"
12
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3388588df8311102dd7b8403ab4d5b52ed8a640958c755d1eab87ebb62f9adb2
3
+ size 867772432
special_tokens_map.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_end|>",
4
+ "<|im_start|>",
5
+ "<|tool_call|>",
6
+ "<|execute_start|>",
7
+ "<|execute_end|>",
8
+ "<|fim_prefix|>",
9
+ "<|fim_middle|>",
10
+ "<|fim_suffix|>"
11
+ ],
12
+ "bos_token": {
13
+ "content": "<s>",
14
+ "lstrip": false,
15
+ "normalized": false,
16
+ "rstrip": false,
17
+ "single_word": false
18
+ },
19
+ "eos_token": {
20
+ "content": "<|im_end|>",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false
25
+ },
26
+ "pad_token": "<|im_end|>",
27
+ "unk_token": {
28
+ "content": "<unk>",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false
33
+ }
34
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bb74d51116831c3bf65db812c553f94ab0c88dcf97a5bbb37e3504f6d359c530
3
+ size 1181204
tokenizer_config.json ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "add_prefix_space": null,
5
+ "added_tokens_decoder": {
6
+ "0": {
7
+ "content": "<unk>",
8
+ "lstrip": false,
9
+ "normalized": false,
10
+ "rstrip": false,
11
+ "single_word": false,
12
+ "special": true
13
+ },
14
+ "1": {
15
+ "content": "<s>",
16
+ "lstrip": false,
17
+ "normalized": false,
18
+ "rstrip": false,
19
+ "single_word": false,
20
+ "special": true
21
+ },
22
+ "2": {
23
+ "content": "</s>",
24
+ "lstrip": false,
25
+ "normalized": false,
26
+ "rstrip": false,
27
+ "single_word": false,
28
+ "special": true
29
+ },
30
+ "73440": {
31
+ "content": "<|im_end|>",
32
+ "lstrip": false,
33
+ "normalized": false,
34
+ "rstrip": false,
35
+ "single_word": false,
36
+ "special": true
37
+ },
38
+ "73441": {
39
+ "content": "<|im_start|>",
40
+ "lstrip": false,
41
+ "normalized": false,
42
+ "rstrip": false,
43
+ "single_word": false,
44
+ "special": true
45
+ },
46
+ "73442": {
47
+ "content": "<|tool_call|>",
48
+ "lstrip": false,
49
+ "normalized": false,
50
+ "rstrip": false,
51
+ "single_word": false,
52
+ "special": true
53
+ },
54
+ "73443": {
55
+ "content": "<|execute_start|>",
56
+ "lstrip": false,
57
+ "normalized": false,
58
+ "rstrip": false,
59
+ "single_word": false,
60
+ "special": true
61
+ },
62
+ "73444": {
63
+ "content": "<|execute_end|>",
64
+ "lstrip": false,
65
+ "normalized": false,
66
+ "rstrip": false,
67
+ "single_word": false,
68
+ "special": true
69
+ },
70
+ "73445": {
71
+ "content": "<|fim_prefix|>",
72
+ "lstrip": false,
73
+ "normalized": false,
74
+ "rstrip": false,
75
+ "single_word": false,
76
+ "special": true
77
+ },
78
+ "73446": {
79
+ "content": "<|fim_middle|>",
80
+ "lstrip": false,
81
+ "normalized": false,
82
+ "rstrip": false,
83
+ "single_word": false,
84
+ "special": true
85
+ },
86
+ "73447": {
87
+ "content": "<|fim_suffix|>",
88
+ "lstrip": false,
89
+ "normalized": false,
90
+ "rstrip": false,
91
+ "single_word": false,
92
+ "special": true
93
+ }
94
+ },
95
+ "additional_special_tokens": [
96
+ "<|im_end|>",
97
+ "<|im_start|>",
98
+ "<|tool_call|>",
99
+ "<|execute_start|>",
100
+ "<|execute_end|>",
101
+ "<|fim_prefix|>",
102
+ "<|fim_middle|>",
103
+ "<|fim_suffix|>"
104
+ ],
105
+ "bos_token": "<s>",
106
+ "clean_up_tokenization_spaces": false,
107
+ "eos_token": "<|im_end|>",
108
+ "extra_special_tokens": {},
109
+ "legacy": true,
110
+ "model_max_length": 1000000000000000019884624838656,
111
+ "pad_token": "<|im_end|>",
112
+ "sp_model_kwargs": {},
113
+ "spaces_between_special_tokens": false,
114
+ "tokenizer_class": "LlamaTokenizer",
115
+ "unk_token": "<unk>",
116
+ "use_default_system_prompt": false
117
+ }