diff --git a/.gitattributes b/.gitattributes
index a6344aac8c09253b3b630fb776ae94478aa0275b..35d504ef292a64a23eeaeb1aa83a14008b3624da 100644
--- a/.gitattributes
+++ b/.gitattributes
@@ -33,3 +33,153 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
*.zip filter=lfs diff=lfs merge=lfs -text
*.zst filter=lfs diff=lfs merge=lfs -text
*tfevents* filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_100/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_1000/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_1020/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_1040/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_1060/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_1080/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_1100/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_1120/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_1140/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_1160/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_1180/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_120/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_1200/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_1220/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_1240/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_1260/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_1280/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_1300/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_1320/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_1340/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_1360/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_1380/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_140/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_1400/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_1420/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_1440/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_1460/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_1480/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_1500/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_1520/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_1540/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_1560/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_1580/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_160/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_1600/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_1620/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_1640/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_1660/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_1680/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_1700/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_1720/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_1740/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_1760/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_1780/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_180/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_1800/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_1820/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_1840/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_1860/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_1880/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_1900/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_1920/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_1940/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_1960/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_1980/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_20/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_200/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_2000/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_2020/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_2040/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_2060/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_2080/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_2100/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_2120/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_2140/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_2160/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_2180/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_220/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_2200/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_2220/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_2240/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_2260/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_2280/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_2300/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_2320/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_2340/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_2360/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_2380/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_240/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_2400/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_2420/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_2440/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_2460/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_2480/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_2500/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_2520/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_2540/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_2560/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_2580/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_260/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_2600/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_2620/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_2640/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_2660/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_2680/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_2700/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_2720/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_2740/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_2760/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_2780/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_280/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_2800/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_2820/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_2840/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_2860/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_2880/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_2900/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_2920/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_2940/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_2960/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_2980/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_300/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_3000/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_320/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_340/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_360/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_380/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_40/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_400/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_420/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_440/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_460/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_480/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_500/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_520/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_540/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_560/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_580/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_60/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_600/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_620/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_640/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_660/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_680/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_700/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_720/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_740/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_760/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_780/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_80/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_800/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_820/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_840/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_860/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_880/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_900/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_920/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_940/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_960/tokenizer.json filter=lfs diff=lfs merge=lfs -text
+deepscaler-1.5b-8k/actor/global_step_980/tokenizer.json filter=lfs diff=lfs merge=lfs -text
diff --git a/deepscaler-1.5b-8k/actor/global_step_100/config.json b/deepscaler-1.5b-8k/actor/global_step_100/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_100/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_100/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_100/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_100/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_100/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_100/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..99d598803b6934583c214c821c71b61fd81b00b3
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_100/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:91d1a619a92a6a6b9c0170be2c1d5e302884d0cd5441eac9391b9c3fb39d6aa8
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_100/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_100/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..3a5ff5ebb55375582e8c3cf65d39e0d358497831
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_100/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:73f4df56ccfe85d868610f7cb50ba9779d4869f6ac1547c8f3992fafc3d850bf
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_100/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_100/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_100/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_100/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_100/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_100/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_100/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_100/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_100/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_100/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_100/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_100/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1000/config.json b/deepscaler-1.5b-8k/actor/global_step_1000/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1000/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1000/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_1000/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1000/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1000/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_1000/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..37272362f2a920797e5d870c463e9193faddc39f
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1000/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a574322ac101a3ac66fcb8cf1ccfb07d1a0bd6f1069d08f7610979e71e99fff8
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_1000/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_1000/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..918617dbeaaa5d857acd9d3c22cc10de1cc0d093
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1000/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:5ff751b1c83ec3b7c144aeb62b254e901df8a7a5518ecccc0c7f5f4274870e23
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_1000/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_1000/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1000/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1000/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_1000/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1000/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1000/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_1000/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1000/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_1000/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_1000/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1000/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1020/config.json b/deepscaler-1.5b-8k/actor/global_step_1020/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1020/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1020/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_1020/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1020/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1020/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_1020/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..c1dabb11ddaa20ec3c9dc2fda25d7bdbd7e9f637
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1020/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:06f5084e22d07b991c2350e0275f6c9fc7f9ba3f95b89a6d815b76bf2ba6bce0
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_1020/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_1020/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..21e89a1505969aebabf8c7d0514e120851e21037
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1020/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a04fec425fa3599990ba30cad1965dc84eebe7fce45b809645024c91125ee0d9
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_1020/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_1020/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1020/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1020/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_1020/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1020/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1020/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_1020/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1020/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_1020/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_1020/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1020/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1040/config.json b/deepscaler-1.5b-8k/actor/global_step_1040/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1040/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1040/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_1040/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1040/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1040/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_1040/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..f37d4d20cef3930fee4ba552422711c3cad1c4dc
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1040/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1fee44e4f7b49092a8dffd374ac5ea69b7c20bc0d636c721a74837007fb42efa
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_1040/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_1040/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..d729b9356c1a68f135d75f5628bad3798047859e
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1040/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3c4125ac120f48d113a4c979d6ac591dc8b10839c1d6bb3b0f941316c5379b21
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_1040/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_1040/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1040/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1040/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_1040/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1040/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1040/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_1040/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1040/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_1040/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_1040/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1040/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1060/config.json b/deepscaler-1.5b-8k/actor/global_step_1060/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1060/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1060/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_1060/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1060/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1060/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_1060/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..e28fa2af2871d14ff15d7d458423e7d63de95e87
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1060/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d8e8e5ca7115a493ada94125e3aa356c435fc636205c62a747d1d7038a7a7292
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_1060/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_1060/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..a27e932b2f507113aece94d87fa390ce908be3cb
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1060/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d4682093ac6789553dc0e6f95bf66fcc2d064b949e2a70b697ec84874e0ab4da
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_1060/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_1060/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1060/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1060/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_1060/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1060/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1060/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_1060/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1060/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_1060/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_1060/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1060/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1080/config.json b/deepscaler-1.5b-8k/actor/global_step_1080/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1080/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1080/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_1080/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1080/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1080/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_1080/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..66aa5f086d69eb4dd026b908e104ce71a27f1dad
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1080/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:dee58d0545f82cfa0e52b073364cfbd0a83c253de5a1a27c7bdccf5ce9c52d28
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_1080/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_1080/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..2a9a62dc89d6aa550df490a98629745632669eb1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1080/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c1b19765e7e99f98157dfedcce5c12f02a196960110e9aa5c860ef6a64718755
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_1080/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_1080/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1080/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1080/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_1080/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1080/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1080/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_1080/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1080/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_1080/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_1080/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1080/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1100/config.json b/deepscaler-1.5b-8k/actor/global_step_1100/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1100/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1100/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_1100/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1100/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1100/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_1100/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..94e2f46e802c566be7c6cf9599cd416cb468289d
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1100/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:53865407d1b14c8a8909a15e5ab539d4c4802cc46492deb9d0281461ed46d916
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_1100/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_1100/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..b3b636c6d335149e7dba282e1d344c2a1760acf5
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1100/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:59ed8a5c0f326625bfd782286295b0c103ccd95bef98c385881dd3898fcbe98c
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_1100/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_1100/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1100/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1100/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_1100/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1100/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1100/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_1100/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1100/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_1100/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_1100/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1100/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1120/config.json b/deepscaler-1.5b-8k/actor/global_step_1120/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1120/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1120/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_1120/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1120/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1120/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_1120/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..f6f0ba79fe00db0775d52e4e876cc47af7856a91
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1120/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4988077630246c7d4c3fafdff5f509c0364c5973ca6eb69c53746f16ecd7bfb1
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_1120/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_1120/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..473a180558a27e0583e513c48f9ee5309d341594
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1120/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a49e72c028954964a10a78e9af0dc7a43e5407e60c1a9afe232a4b6f4b8dd80e
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_1120/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_1120/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1120/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1120/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_1120/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1120/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1120/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_1120/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1120/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_1120/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_1120/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1120/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1140/config.json b/deepscaler-1.5b-8k/actor/global_step_1140/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1140/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1140/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_1140/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1140/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1140/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_1140/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..ddea7ceb89e7b48de6fa6df2cbe6360a15780648
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1140/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4c91f9a13ce2f5db48045c370fa02ddf442c7786cc0027ab235667c5f781fc53
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_1140/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_1140/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..7de0351b0e9bd26487193f510972b59ef74066f8
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1140/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b19a2bf875f0942a6b0759a5e4705f3bff4b177d420cf451d2b12efd824d4339
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_1140/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_1140/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1140/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1140/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_1140/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1140/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1140/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_1140/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1140/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_1140/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_1140/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1140/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1160/config.json b/deepscaler-1.5b-8k/actor/global_step_1160/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1160/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1160/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_1160/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1160/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1160/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_1160/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..e5f4b5be0fccc1bf5c285087bf02cc253ae31c10
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1160/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1f5190da7009b1001b9b291e0f5a1d9aecc3eafcae9e1f4afe335acc3b436d16
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_1160/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_1160/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..32e8efce20f25532f925c10e08ca3d4d98b23467
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1160/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b8c505dfcd69aa0f74840db3b4cc37efbf34dfd7efae93caa909235908fec716
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_1160/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_1160/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1160/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1160/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_1160/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1160/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1160/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_1160/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1160/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_1160/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_1160/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1160/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1180/config.json b/deepscaler-1.5b-8k/actor/global_step_1180/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1180/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1180/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_1180/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1180/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1180/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_1180/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..7cb92289e440f17a11f6b8271abc6a4c9c1d265b
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1180/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:98f9822baecf0c7f0a5578bab4ca1c6a385bc634a1fe5b5435d1d5c8cb9ac581
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_1180/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_1180/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..545ff8e9b5e586c2ba81b3d92b446d6de45d5d29
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1180/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e39dd63c3f365c709cdc8af7c2af81a227267d13305a4cf6ed5aeccb75e2e767
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_1180/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_1180/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1180/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1180/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_1180/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1180/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1180/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_1180/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1180/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_1180/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_1180/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1180/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_120/config.json b/deepscaler-1.5b-8k/actor/global_step_120/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_120/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_120/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_120/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_120/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_120/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_120/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..b487d8d94d0f896c139060e0f395be9b96d3fa9a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_120/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f91c1e010a4e197d207f98e28fd3c694b84ed74f008b93bbc7100e6a42872940
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_120/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_120/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..babcc53c0ad50dc0195a126f67d3fbbbae794651
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_120/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c66e7fd357fbc6ca2a5c4472452f0527b1a8d4a7004061a542285d7601c4ceb6
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_120/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_120/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_120/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_120/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_120/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_120/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_120/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_120/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_120/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_120/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_120/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_120/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1200/config.json b/deepscaler-1.5b-8k/actor/global_step_1200/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1200/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1200/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_1200/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1200/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1200/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_1200/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..4d77391e7e475313094a68d328fb19e44477397c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1200/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9da44b5f5418200db230aa533bba5ca856cef3bef9a2fa3772534190dd977949
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_1200/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_1200/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..a1b6d440eb39436525debf24fa939c45d0231b9b
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1200/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f9384028b53d036a1d897141b2ba0b42b4cffd040d1e9ac0553a16c812c8a164
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_1200/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_1200/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1200/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1200/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_1200/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1200/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1200/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_1200/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1200/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_1200/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_1200/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1200/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1220/config.json b/deepscaler-1.5b-8k/actor/global_step_1220/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1220/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1220/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_1220/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1220/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1220/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_1220/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..2f283057995bc6ba8253f909ab63b15c97198235
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1220/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:643b712f902ea5e13172fe3d8bd34219a89265552d9381591addce55c7f26b31
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_1220/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_1220/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..3b122babd3a2440fc3e650dd997a43273cfada33
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1220/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:39b559edc092699ffa2ebf1a04b6e1b60deb97d9d27868d130c04fd4a3daf8f8
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_1220/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_1220/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1220/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1220/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_1220/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1220/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1220/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_1220/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1220/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_1220/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_1220/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1220/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1240/config.json b/deepscaler-1.5b-8k/actor/global_step_1240/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1240/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1240/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_1240/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1240/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1240/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_1240/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..221aeddd56c9e0953864567582def5635994cae7
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1240/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:740b9707cb604de00893d2dda28fb47d71ff2d06c1cf9d916003612bc57bb7e3
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_1240/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_1240/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..555846e354c91b6c80ffa776bbb20a5cca7ccf79
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1240/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c2813a3d77b4f12ee327e2d695db652c332ef0601f56da35745292d89705e44a
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_1240/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_1240/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1240/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1240/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_1240/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1240/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1240/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_1240/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1240/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_1240/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_1240/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1240/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1260/config.json b/deepscaler-1.5b-8k/actor/global_step_1260/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1260/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1260/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_1260/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1260/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1260/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_1260/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..37d2466219ad6dbae163a25df33a4f8e51ca8d06
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1260/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:966dea6d440dddf996e6d07d68ea2e612f845d3d5770655d09a3589746d9795a
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_1260/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_1260/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..c540274a33432e4e2b2ebc9c26bfce2337ca4f1d
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1260/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c7bf685f9790483c0302f651f47ee32737002a1de8670a42816fd68f7c240e59
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_1260/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_1260/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1260/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1260/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_1260/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1260/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1260/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_1260/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1260/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_1260/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_1260/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1260/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1280/config.json b/deepscaler-1.5b-8k/actor/global_step_1280/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1280/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1280/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_1280/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1280/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1280/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_1280/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..f80ba4f23ba961776b6aa9178559a113aaebc4f8
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1280/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:927428688fb66261619cad1f0584b948fd4a79c6021326943edd8d1a0e54831c
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_1280/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_1280/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..e25593554c73390a918baec4fbc1c88e911d8dc0
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1280/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:37124e2d124bef50f227e467243fd065738a573156dcd1629edcfa395b03b2ee
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_1280/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_1280/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1280/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1280/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_1280/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1280/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1280/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_1280/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1280/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_1280/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_1280/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1280/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1300/config.json b/deepscaler-1.5b-8k/actor/global_step_1300/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1300/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1300/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_1300/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1300/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1300/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_1300/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..ae0d23cd6027a5fd49eeae27002053536234a784
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1300/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1444e66e4711391a3d4f869559ac452faa35aec431c47978fa7a86efca78ee8a
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_1300/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_1300/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..1366946e5f44ddc6c74f49c60dfaea62f6a1eeb5
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1300/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ab93ae79f6ce24e33bd97ca929bd0ea677b943fbeeecdc105965ee3dff8e44b6
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_1300/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_1300/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1300/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1300/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_1300/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1300/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1300/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_1300/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1300/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_1300/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_1300/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1300/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1320/config.json b/deepscaler-1.5b-8k/actor/global_step_1320/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1320/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1320/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_1320/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1320/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1320/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_1320/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..72120ef4697601e442183030d3037c162f0a850a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1320/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9e3fb143d1c38db566352d5b3a61d15fe05cdb55cdbe85bf47d5296cdccd775a
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_1320/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_1320/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..7ea092bb24b648f39ee1e23e27d45ec5a9cfc181
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1320/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:65ee8a872639db2eee0171262b55ffb51b2f7927901e3e67cdb62ea4f1a35070
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_1320/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_1320/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1320/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1320/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_1320/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1320/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1320/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_1320/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1320/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_1320/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_1320/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1320/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1340/config.json b/deepscaler-1.5b-8k/actor/global_step_1340/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1340/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1340/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_1340/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1340/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1340/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_1340/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..4e6ea0cbacdef104824a002a6538118e55cc9ee6
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1340/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1c23a611b5f1a6c5b65cd2ff3813effdb1787c9bea238456a94be51e52559e9f
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_1340/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_1340/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..67400abe8d602283f1fa718b2edb362ab3474020
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1340/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2836a8cdcea2485bbe185ffcc0c6ee29d9da7fa1069e9c2eadf533d7f681ddee
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_1340/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_1340/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1340/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1340/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_1340/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1340/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1340/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_1340/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1340/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_1340/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_1340/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1340/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1360/config.json b/deepscaler-1.5b-8k/actor/global_step_1360/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1360/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1360/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_1360/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1360/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1360/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_1360/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..2a35f2ce36ba3fbb1b9e83c150769f7b4aa5b91d
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1360/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3c9aad5c617409c88d6060fc9f824026ddcd40c8c9b664d3a126ad1906f82f9b
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_1360/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_1360/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..276fa20cf96f578ba07b4d29dbf6e0afed59acbb
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1360/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:881c3dcfa22958e8b6845488c66a0729310841a6db66ab2a4ffdee2996b41c47
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_1360/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_1360/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1360/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1360/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_1360/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1360/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1360/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_1360/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1360/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_1360/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_1360/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1360/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1380/config.json b/deepscaler-1.5b-8k/actor/global_step_1380/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1380/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1380/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_1380/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1380/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1380/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_1380/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..182396dad040375c01ada8c0aeafdc89bdf1fbfd
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1380/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2519d031a1fad6776fd8f4bac5866922dda1e7f0b2f758d4b5a9d0aacc9b9c3e
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_1380/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_1380/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..72b3022bda46cdf28b186c509b4944da8437f695
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1380/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1301a2e5dccf37f744902aa0f6e3f90e6cf8b72464aee344dc65a1457acb527d
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_1380/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_1380/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1380/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1380/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_1380/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1380/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1380/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_1380/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1380/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_1380/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_1380/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1380/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_140/config.json b/deepscaler-1.5b-8k/actor/global_step_140/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_140/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_140/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_140/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_140/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_140/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_140/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..c1504f382aa43b8d2c1b1ee201c6a984cb3c67e5
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_140/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:94dc75aee8bb0e9691b37e2645193aa394ecfa75607ab84645479cfba31a0649
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_140/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_140/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..d9f271911427bbca6bc856ac8d6e377f9a991e5f
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_140/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:61b0bc4aec2df7304f7ffa87cd76764ede135cbbafa253347aec2269a4a01422
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_140/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_140/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_140/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_140/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_140/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_140/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_140/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_140/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_140/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_140/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_140/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_140/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1400/config.json b/deepscaler-1.5b-8k/actor/global_step_1400/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1400/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1400/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_1400/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1400/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1400/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_1400/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..18094e969847a9a60b87a8c6f4c3009e148a1e29
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1400/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3ff9597576b391476072cc09aeb9136a46fa1db38d4cabb56638daee6d6ff49e
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_1400/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_1400/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..ce71e4983a5a44d3ad8625cb96194a0f15031231
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1400/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f80b739fcd0d2f8eeb28727352dc948e88207a48cc9d56bb16cd649b0cfeadee
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_1400/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_1400/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1400/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1400/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_1400/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1400/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1400/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_1400/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1400/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_1400/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_1400/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1400/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1420/config.json b/deepscaler-1.5b-8k/actor/global_step_1420/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1420/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1420/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_1420/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1420/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1420/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_1420/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..cdfc41b421c7d4d717ac44d67ef6bad7813a090b
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1420/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:bdb8cd55d9b47c66d8fb20d258112d40a0fa94221c88e92c27a7536bfa965a67
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_1420/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_1420/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..6f3d27b3c130a0f9947b7f91eaa59052d3762408
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1420/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3c9fc848d38edc813f6850b0f02f66f24857280d66265b3f13493393bb255ecf
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_1420/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_1420/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1420/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1420/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_1420/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1420/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1420/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_1420/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1420/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_1420/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_1420/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1420/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1440/config.json b/deepscaler-1.5b-8k/actor/global_step_1440/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1440/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1440/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_1440/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1440/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1440/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_1440/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..c6269bc6006c65ed9bdf9cb308dda2f6d6d3987d
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1440/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6169359fade886825918270950cd0dcfa9574381003370634656e711fec4e6ce
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_1440/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_1440/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..06490aaf1d891592bef209f6e33b1f34afb713ec
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1440/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:917c180268eae55b6678c75512679a373322f5fc2c7bdf8bc4203be7a39ae903
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_1440/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_1440/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1440/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1440/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_1440/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1440/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1440/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_1440/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1440/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_1440/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_1440/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1440/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1460/config.json b/deepscaler-1.5b-8k/actor/global_step_1460/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1460/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1460/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_1460/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1460/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1460/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_1460/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..14fe49758c08343c5f914b39159ecd6749556490
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1460/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:837b4066f9109b255c1c997beb0375b0fb3a9eb2b44eaef2d15a714862e54aec
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_1460/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_1460/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..994f249efc411d896c2111fc09c64ffe1bfee8b3
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1460/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d917823e9d7e4cf52291f95e5c1bb13000dbf7e2f15b5ca991ea30a598f61db3
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_1460/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_1460/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1460/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1460/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_1460/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1460/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1460/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_1460/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1460/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_1460/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_1460/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1460/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1480/config.json b/deepscaler-1.5b-8k/actor/global_step_1480/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1480/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1480/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_1480/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1480/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1480/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_1480/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..8a2d0dbeb5e2c8f60eac5e6b972671833d517788
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1480/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:76e5030a1bfc5bf60db6300edb7e3008d3169a9debd7c43ecd1bd82198a0aacc
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_1480/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_1480/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..ea7c85aaee495ff7cc6587cefba5fdc82be31ea9
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1480/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3a71f75d5765ffb4571796e2c0c6b85567bd567fcc843387c4bfc4a8b0b050a9
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_1480/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_1480/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1480/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1480/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_1480/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1480/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1480/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_1480/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1480/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_1480/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_1480/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1480/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1500/config.json b/deepscaler-1.5b-8k/actor/global_step_1500/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1500/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1500/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_1500/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1500/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1500/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_1500/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..ba7cf6fded2ba2b36eaddf8bee7f8d78ddc80dbe
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1500/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8a0a8477c2aeca3c7a61b5059832add7bbe5caffe5c4461a52abfd9a0509df64
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_1500/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_1500/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..51c359e45c62f7fff4ff2dbf182739c580e8b96c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1500/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0723df84dff917c995829363bb2415a6efea7adf8f71b701e4caa363e2ec3e68
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_1500/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_1500/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1500/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1500/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_1500/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1500/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1500/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_1500/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1500/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_1500/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_1500/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1500/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1520/config.json b/deepscaler-1.5b-8k/actor/global_step_1520/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1520/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1520/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_1520/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1520/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1520/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_1520/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..a4dcba0fe601b46152a4903407b5a8d4fa84d9f4
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1520/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:5860f8da8b2a792093a934666f2017be8d7117ea45c886728b2933ba15b57f7a
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_1520/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_1520/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..0ad65ef27ee9067925580f5e294b85afdf65fb04
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1520/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:488bb39b1a7a5689925632c3a6c3de2f3736d75332a86b83f21a9b77ac12538f
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_1520/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_1520/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1520/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1520/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_1520/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1520/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1520/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_1520/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1520/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_1520/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_1520/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1520/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1540/config.json b/deepscaler-1.5b-8k/actor/global_step_1540/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1540/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1540/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_1540/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1540/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1540/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_1540/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..db4b06c39ff8cfee6cbd44b7e2144890cdd64f39
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1540/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e4d717d2db91e398bc064d5967e64cf324e3068b7a0482fc2302957f66af8464
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_1540/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_1540/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..7670a65c26c3e143782531bf0ebba7e9a1d9caf3
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1540/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:560c8d386b6de8627f92d5904e38a73c6884af42773281adc4c1e92bc81edc23
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_1540/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_1540/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1540/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1540/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_1540/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1540/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1540/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_1540/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1540/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_1540/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_1540/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1540/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1560/config.json b/deepscaler-1.5b-8k/actor/global_step_1560/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1560/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1560/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_1560/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1560/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1560/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_1560/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..39a0c672e382320483a1073d8d3205755ef99b73
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1560/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:80608676e2db3ddd0533db1b37418efb91e1e2f207c71563d66cd9f624f80976
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_1560/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_1560/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..33820aed6416b48dd753b2296f1c0e48ed0cdcd8
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1560/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4845e1ecab63204e4bd4793b63fc78eeb2b5678e49191dc070792573db2bb5d2
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_1560/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_1560/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1560/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1560/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_1560/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1560/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1560/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_1560/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1560/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_1560/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_1560/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1560/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1580/config.json b/deepscaler-1.5b-8k/actor/global_step_1580/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1580/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1580/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_1580/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1580/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1580/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_1580/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..44c57b86a348e2f03733e01142d3570078a348b3
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1580/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:725268edc2389d69a024e09c7a846d89aefff532da52c041fce9043409c79f89
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_1580/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_1580/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..f012a4223d66bf1cb4a20954d9823555996cf006
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1580/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:fc096735a096bf49a757e9d32819ebe560a86119f8c1a28769d86b0289789dae
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_1580/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_1580/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1580/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1580/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_1580/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1580/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1580/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_1580/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1580/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_1580/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_1580/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1580/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_160/config.json b/deepscaler-1.5b-8k/actor/global_step_160/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_160/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_160/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_160/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_160/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_160/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_160/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..8352b8d752b9af73521a9106f6728f4b6de4e652
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_160/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:23530b0478a752b014f39081030a76ca358ff3c696f7dcbb04ec0d31ee612b92
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_160/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_160/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..6f9f2871cefe1ba89dd7c0c13d31a516b90168bb
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_160/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b410a3e4b219e9dfb375b655138f72fa562ef36060ec9eb01ee60e96ba4385cb
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_160/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_160/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_160/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_160/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_160/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_160/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_160/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_160/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_160/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_160/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_160/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_160/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1600/config.json b/deepscaler-1.5b-8k/actor/global_step_1600/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1600/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1600/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_1600/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1600/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1600/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_1600/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..5c42cd2fe78cf84775795944fee711e7ff4840b2
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1600/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:fb0dcdbb9344d89f6c0d540223f7df9266098f1ab39187350827caffe33f6f15
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_1600/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_1600/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..24aad0149b33b12c7b8d1ecc0fe6c22947623cb4
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1600/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:cedbef48c7b3ea14c4b0c801f9eb9d9848604f451d34352ef427efc0af35e957
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_1600/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_1600/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1600/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1600/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_1600/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1600/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1600/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_1600/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1600/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_1600/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_1600/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1600/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1620/config.json b/deepscaler-1.5b-8k/actor/global_step_1620/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1620/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1620/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_1620/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1620/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1620/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_1620/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..0d0c65945083dbe59a2166c0aa8958759e34bdbd
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1620/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:057ed4cfe81871e103a25cd3cd87505334f35fe69cf9b312333942125fff7549
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_1620/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_1620/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..e1cf989a9c9de480bb5b7424a1b7e10d60c61783
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1620/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:024d8f088ae6678d28cfbf1262b3fa95c4944801376680fe603c2e5eea13cf0c
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_1620/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_1620/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1620/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1620/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_1620/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1620/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1620/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_1620/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1620/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_1620/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_1620/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1620/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1640/config.json b/deepscaler-1.5b-8k/actor/global_step_1640/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1640/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1640/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_1640/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1640/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1640/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_1640/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..97e5acec3559d2bc502e92a5fa66984fc0cf6c4c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1640/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:16912d6bfd9f8f313169ff66fa2c8bb274191d1273f317dab9ccdbd713c459db
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_1640/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_1640/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..cd27d4e28acb07aed8743003dbd5714ed2cc44fe
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1640/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:dae53a1a60537d010a6075b6eb6e1cb65d8e909e14ff8faace83c0e4316880ca
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_1640/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_1640/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1640/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1640/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_1640/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1640/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1640/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_1640/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1640/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_1640/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_1640/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1640/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1660/config.json b/deepscaler-1.5b-8k/actor/global_step_1660/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1660/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1660/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_1660/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1660/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1660/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_1660/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..f90ea3c4c6c857ff03a9c8f54fac424066d3fe73
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1660/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2e4fc36e410143ba06aa07581187c1e5de656c9878c8d9156daec26abd03f274
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_1660/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_1660/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..520609d0865fe26ec1c8db6a95820d7d9962148e
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1660/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:52357e5ab2ea30d3f778b8db292fbc03a3f31556e589eed454a920fda5600e12
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_1660/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_1660/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1660/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1660/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_1660/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1660/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1660/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_1660/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1660/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_1660/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_1660/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1660/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1680/config.json b/deepscaler-1.5b-8k/actor/global_step_1680/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1680/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1680/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_1680/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1680/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1680/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_1680/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..de050d988aefd9fe51a615d9617d13128dce2cfa
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1680/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9a9c1bd4031815f9938f6b5fc5873e9992368a672ea86056b9c30a12bcbbc63b
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_1680/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_1680/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..22544b6de4978ffcd6428457fc6637fc94c89190
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1680/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c9261a7e2033d8b968624e53002905ac46ab716c820da9b7e97c5593dcdaa6c3
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_1680/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_1680/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1680/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1680/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_1680/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1680/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1680/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_1680/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1680/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_1680/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_1680/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1680/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1700/config.json b/deepscaler-1.5b-8k/actor/global_step_1700/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1700/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1700/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_1700/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1700/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1700/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_1700/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..f9c858eb83b15582c7348c54f70da184b1520dab
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1700/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:088bfcdd13df3eec75b0b32ebffb7c528ac36a900da419165c5509d4e548340f
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_1700/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_1700/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..34be52aa566fc763341a61faf1370023a4bd6bab
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1700/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d1955dc73395b039e77fd09d6e994aebaf2caa461b00d394b1ebedfb477af262
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_1700/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_1700/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1700/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1700/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_1700/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1700/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1700/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_1700/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1700/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_1700/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_1700/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1700/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1720/config.json b/deepscaler-1.5b-8k/actor/global_step_1720/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1720/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1720/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_1720/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1720/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1720/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_1720/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..66872cf727b7dc7351022bd503f793a026097d4b
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1720/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:19ff631cffb3e82a9e4a221605fcf31a428b8b364e78e78d96c3144d0d81b2be
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_1720/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_1720/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..583e9a32e49077171b73164b21794c3e054ab972
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1720/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:17d2fe0b5fc201e6deb689a5748dbe8cd3302b8a82bb1238b2d2226d23fb52d3
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_1720/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_1720/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1720/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1720/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_1720/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1720/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1720/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_1720/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1720/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_1720/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_1720/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1720/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1740/config.json b/deepscaler-1.5b-8k/actor/global_step_1740/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1740/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1740/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_1740/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1740/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1740/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_1740/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..17fc80cc2b337808cfa6cf3e07d94fbaf7b5e134
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1740/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c965ce2f41c404cac839790998795ad482b7eb57559c0e4ab0b4bc94cba1c048
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_1740/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_1740/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..cc53a5a5c29bf62113a21840de58a3ad1f6f0a06
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1740/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1d060753f3b08f81fc1f6cbb6c34e2ae2944cfa0631af3181abcfe0c61d4c001
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_1740/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_1740/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1740/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1740/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_1740/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1740/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1740/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_1740/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1740/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_1740/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_1740/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1740/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1760/config.json b/deepscaler-1.5b-8k/actor/global_step_1760/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1760/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1760/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_1760/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1760/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1760/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_1760/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..8c735d39cdda7dc358dd5291bce1a7f0d684fe0b
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1760/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1fafe3c8fa80ff49aeec25ecbbf2311cf57347f7a3624ecb32a3f2ccbf7163ca
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_1760/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_1760/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..6461e0661b10023a6a145d98eb516a7108efb76e
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1760/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7b603aebe1af365eab3bfb5e754126745c5e34c07bc40f7f7088b7c99c15eddc
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_1760/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_1760/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1760/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1760/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_1760/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1760/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1760/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_1760/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1760/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_1760/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_1760/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1760/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1780/config.json b/deepscaler-1.5b-8k/actor/global_step_1780/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1780/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1780/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_1780/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1780/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1780/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_1780/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..6a556d6c705e78afe9eca7bf2f538f8f6f5590a3
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1780/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0662c3a0d975d1146a5de5b49467160f0c687089ccead8cf0a4b99ed043e38ce
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_1780/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_1780/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..4d3dd9848a01fbb819a8be9a2e5126eb783a5942
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1780/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8d4274bbbb893e66b011ff0306fec12c679b8f687c02f42cae4d7b04ee0139d1
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_1780/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_1780/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1780/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1780/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_1780/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1780/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1780/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_1780/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1780/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_1780/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_1780/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1780/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_180/config.json b/deepscaler-1.5b-8k/actor/global_step_180/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_180/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_180/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_180/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_180/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_180/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_180/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..b5a9f51b3fb75e53ad665af8037c50d187336e88
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_180/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:5dda1e0b0b822813ea341c0221ccf01e69f0e09c42c38abaa0eaf52ca127061a
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_180/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_180/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..efb96986cd8a7c9f2ffdb2d6d4ee46662198dc0a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_180/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:fab0e8c04b9f4895ad147918f3336e520358ca093e9d78b5e7a5765ba9fc9932
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_180/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_180/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_180/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_180/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_180/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_180/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_180/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_180/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_180/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_180/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_180/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_180/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1800/config.json b/deepscaler-1.5b-8k/actor/global_step_1800/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1800/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1800/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_1800/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1800/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1800/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_1800/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..6a7ef5553b5f2437432fc38cbc3c01ab78e42f03
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1800/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c24d316b62c1d92c0d61c47f2f706c0cf899903189c3bf97623a610924e2b040
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_1800/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_1800/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..010fdcf8d6d34e1c3e97dd32e789a6b447913dbb
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1800/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8b9cf2116d3290d1083218e62912dfe7870194078e0c9f7deaa5b90b37f49e70
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_1800/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_1800/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1800/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1800/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_1800/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1800/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1800/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_1800/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1800/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_1800/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_1800/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1800/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1820/config.json b/deepscaler-1.5b-8k/actor/global_step_1820/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1820/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1820/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_1820/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1820/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1820/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_1820/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..08df7a6c7ed8c3e7488160511bd8b44a3d8076f7
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1820/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8b0c77787782218bbe50d05417ec8677e7deff6df425a4c254c095529430ba5e
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_1820/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_1820/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..a33b474e9dd1e7125ef467d1babcb67a69bf41e5
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1820/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8d8da6b3304d607bf05ead218381ee66aa93d5c7f0cce83f837ce6394553b4ce
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_1820/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_1820/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1820/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1820/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_1820/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1820/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1820/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_1820/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1820/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_1820/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_1820/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1820/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1840/config.json b/deepscaler-1.5b-8k/actor/global_step_1840/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1840/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1840/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_1840/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1840/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1840/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_1840/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..24c0506060756c4269c76ba55e733777970939ea
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1840/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:504da7a520d29a54f1f17e3e3c63ec2600b5ce1179cd03e5fb8f586cd5e6f5e3
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_1840/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_1840/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..2c0a809b0d4807ca1b6c0df500dd65be41596448
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1840/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:fcc3c47ff2439f6c2765df6da16b5f1db7c7201ab082286187984aabe31582a8
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_1840/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_1840/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1840/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1840/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_1840/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1840/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1840/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_1840/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1840/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_1840/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_1840/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1840/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1860/config.json b/deepscaler-1.5b-8k/actor/global_step_1860/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1860/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1860/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_1860/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1860/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1860/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_1860/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..c63d98298ec75ef8f84ef706f89a93a72c7c8e34
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1860/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d24f39c42f678a024d9f89afe4182beb0962fa70739eeeb0634c546a8f8979e8
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_1860/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_1860/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..f6fbdf1265eaa3e36d6edb084fa1f4a1ebbb5202
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1860/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:00da7cd2b68b5c8d1c3f97b3c2e7a0b0e01ab946e6bc3dc8f56a566aedbbee46
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_1860/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_1860/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1860/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1860/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_1860/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1860/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1860/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_1860/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1860/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_1860/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_1860/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1860/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1880/config.json b/deepscaler-1.5b-8k/actor/global_step_1880/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1880/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1880/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_1880/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1880/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1880/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_1880/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..708f2cd8066aef764585da09362d5483f856565b
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1880/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d278a24e1818a4f9de53d8f5a437a3cd92cc5b4b637e09d7ed698eb7a1030527
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_1880/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_1880/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..33e2ab1dc91774d2ff32bffde301e3968424eec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1880/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:01bf5584b212c3cbe8bfeadce99a53b45306ea4de3a854d1a5e0a049a8a3aa3f
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_1880/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_1880/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1880/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1880/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_1880/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1880/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1880/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_1880/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1880/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_1880/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_1880/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1880/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1900/config.json b/deepscaler-1.5b-8k/actor/global_step_1900/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1900/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1900/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_1900/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1900/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1900/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_1900/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..9eae3af813bd2bff846b9ad4bc7f6c43b749ef00
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1900/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:642ceab2e58838463c0c4757b6bec0b46347e225c259d48e149fee84128fcf89
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_1900/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_1900/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..49277b45389de270e9deaa38c3b2d30e0ad04fa7
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1900/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:01ef14cae57da0f3cfdc181ef8707b6963696277cc12b5476d709d970e142621
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_1900/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_1900/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1900/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1900/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_1900/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1900/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1900/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_1900/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1900/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_1900/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_1900/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1900/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1920/config.json b/deepscaler-1.5b-8k/actor/global_step_1920/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1920/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1920/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_1920/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1920/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1920/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_1920/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..bbb21bd45926263772728322993705f00fa51308
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1920/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:5bb76fa2b252a61bae0f37d1b466660438e3d4fa5a2d1afa11770c05dcecbcd6
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_1920/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_1920/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..718e80c5109f041989ca74343d6a7ee9d7e324f4
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1920/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8a0077c2ca7b3153be11dac4d98ff6bf3e42b91e11fdbd29a82bbff37acad544
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_1920/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_1920/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1920/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1920/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_1920/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1920/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1920/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_1920/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1920/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_1920/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_1920/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1920/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1940/config.json b/deepscaler-1.5b-8k/actor/global_step_1940/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1940/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1940/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_1940/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1940/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1940/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_1940/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..94deafbca4c2d9f7caf7d3c64a106966a78e3894
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1940/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:bcbffa6b95e14bc73ec1d6ccef61b789f7eca583cadf6ebba5b98b92427bd6ac
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_1940/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_1940/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..5fbfcd3079fa4e5b0121bf16b80ed08ee18a5f4c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1940/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ae0e6a97534750c35b60da3bac96bc48c709cfa7e19cd5c3dc2c55025bd8065a
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_1940/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_1940/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1940/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1940/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_1940/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1940/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1940/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_1940/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1940/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_1940/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_1940/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1940/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1960/config.json b/deepscaler-1.5b-8k/actor/global_step_1960/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1960/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1960/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_1960/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1960/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1960/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_1960/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..57c37ad34fa45aea90c9838331fa750066e090b9
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1960/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:29ce9921b091d906a12bf398b4125b56b394667eb0fd1a7931ff3527e5638a43
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_1960/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_1960/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..9ec79e6ca60e8aef8b061067dccf0fade6fe02a7
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1960/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d4afcea08a2f949cafb6ad13fac52d6dbe84635481bfc8127aaf07230231d82b
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_1960/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_1960/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1960/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1960/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_1960/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1960/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1960/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_1960/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1960/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_1960/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_1960/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1960/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1980/config.json b/deepscaler-1.5b-8k/actor/global_step_1980/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1980/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1980/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_1980/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1980/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1980/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_1980/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..22c943522def7440350fa53873d82c4429686633
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1980/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1ba170a7dd182476c59829630d4d7603a4fa5bde33a9faecd9a213ab82b0cd83
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_1980/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_1980/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..aa96aa3687842b21ed65201609c91c769f5c6b27
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1980/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:136f1ded5aedaf36c3542ff8a7fbd43b3fe046f8ebc521af57552792dbd8e730
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_1980/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_1980/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1980/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1980/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_1980/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1980/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_1980/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_1980/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1980/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_1980/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_1980/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_1980/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_20/config.json b/deepscaler-1.5b-8k/actor/global_step_20/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_20/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_20/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_20/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_20/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_20/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_20/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..be3e02863b78897e30d2f3f51ad0e740e52babc2
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_20/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2c14e42a3e2a1d3ad9902a058c5f9c3cf6585ef393c0421a4798bc59cffba3b7
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_20/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_20/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..bffded9ff112c0a2c3a5a7d8227d79095d27bcfd
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_20/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:319a43ff977414a97d1a051b3315f12ae821954d4058664b0bc2eafb44d71e82
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_20/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_20/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_20/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_20/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_20/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_20/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_20/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_20/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_20/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_20/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_20/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_20/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_200/config.json b/deepscaler-1.5b-8k/actor/global_step_200/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_200/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_200/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_200/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_200/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_200/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_200/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..ad9f2d1aecccd334c728f6fb7d3dc75db89d4667
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_200/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:dd7f42c10600b979d97f0369d7fb29c8ef72899c5b5d9503facdf0c32c43b19c
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_200/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_200/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..9a6d9b55d11d5e973ee9c85fa9ab938ba09f803f
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_200/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:5fa7b30e2a62eba34fa8fbf6185ee197d5128aedfd01b81d5dbbbc9bed1145ae
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_200/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_200/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_200/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_200/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_200/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_200/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_200/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_200/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_200/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_200/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_200/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_200/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2000/config.json b/deepscaler-1.5b-8k/actor/global_step_2000/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2000/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2000/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_2000/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2000/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2000/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_2000/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..be9879c1679479755bde44e6de3e7bffb7a2c557
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2000/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2b04145db0e7f24a79b4bd659f2f3db12e8c33cdf96d06ee362a859770bec5f0
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_2000/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_2000/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..5438dd76ee93c08e5caf476ed6aa0d8b3598078a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2000/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a8e37ab398860f61bea5182d37fed069c8bd41d672aa79f95fda11ca53aa38c2
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_2000/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_2000/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2000/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2000/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_2000/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2000/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2000/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_2000/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2000/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_2000/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_2000/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2000/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2020/config.json b/deepscaler-1.5b-8k/actor/global_step_2020/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2020/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2020/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_2020/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2020/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2020/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_2020/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..ccda5fb356cff4f30599a2999dc802a0070a5b94
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2020/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:5e6ae4cb67e09f97b20a8d1470dd210927a4614dc43025564d5ef9225cb7b168
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_2020/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_2020/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..7c3e88459fd01879cbf180006b3d47bbcd8f035c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2020/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c045b7dd72a7bfdd16c640c18bc8ae9b95fa384d3d8045f259af4bd3e2e459c3
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_2020/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_2020/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2020/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2020/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_2020/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2020/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2020/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_2020/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2020/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_2020/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_2020/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2020/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2040/config.json b/deepscaler-1.5b-8k/actor/global_step_2040/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2040/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2040/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_2040/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2040/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2040/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_2040/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..2fc15afc9d7a415b67f293c4063ed1f78a64b2c2
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2040/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:980716e821bb064c1aa1e4bb2d3cd0baf183117304f60dabd666c2c508b8ecac
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_2040/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_2040/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..bc8a63014e62e884b72f876cd0ecce0575bdc5ef
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2040/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:cdb86690067bc917d87ce4e888b440ce54c3271d20a9170cd25191b5389e2006
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_2040/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_2040/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2040/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2040/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_2040/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2040/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2040/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_2040/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2040/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_2040/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_2040/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2040/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2060/config.json b/deepscaler-1.5b-8k/actor/global_step_2060/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2060/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2060/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_2060/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2060/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2060/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_2060/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..29f8a447d26586cd1f065ab8b2b96f92dece18df
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2060/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:dc389807ff7794bbd82d6a601c2e9df26c9c30b857c9d3e9e66b524d3ab46e97
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_2060/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_2060/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..7ce2ac015107e2240cb27ba37acc311d5ce2b380
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2060/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ca8ee105e9d1fdc7e7a6faecb351eb9567a8b1eff821f0ea1498fabd02a39d9a
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_2060/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_2060/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2060/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2060/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_2060/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2060/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2060/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_2060/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2060/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_2060/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_2060/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2060/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2080/config.json b/deepscaler-1.5b-8k/actor/global_step_2080/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2080/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2080/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_2080/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2080/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2080/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_2080/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..577bb058ce8e36ee25255418e0792d377079c04b
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2080/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:196fb3359febfa60c6da25581ff9a66fb0a28fb5accc4a36d3dc4023e59d69fd
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_2080/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_2080/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..e8ad1353ead29ab5795dc0d562db744d68a5d4fc
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2080/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:befc29d37aa6d7eb964d8f299fbea64a9c90f50ab6395891a288ecc1c599d02c
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_2080/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_2080/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2080/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2080/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_2080/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2080/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2080/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_2080/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2080/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_2080/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_2080/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2080/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2100/config.json b/deepscaler-1.5b-8k/actor/global_step_2100/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2100/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2100/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_2100/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2100/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2100/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_2100/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..20c9fd889ac069b3fd895c3cbabd8061699cefff
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2100/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:95f02fe1d8a3f0aa874961d880bd85312e7dd2fb4ca63d8c12b8a49334d7ede2
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_2100/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_2100/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..89cb846c55468108dc65e2778c122904449570be
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2100/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1b06adb4bc8c95a346aeff4637585a468a26f47a90df6e02c65c2e45ba8ea8c5
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_2100/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_2100/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2100/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2100/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_2100/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2100/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2100/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_2100/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2100/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_2100/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_2100/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2100/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2120/config.json b/deepscaler-1.5b-8k/actor/global_step_2120/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2120/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2120/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_2120/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2120/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2120/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_2120/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..969b27b7b3a068d665d982c6a6e80606dba18a16
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2120/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:eabc028926e00986a97c2c0c1f90310b477feac2ea46f751d213268d5894f1a7
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_2120/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_2120/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..cf1bbc5ff010a5fc631b2ff3fe6cd806a5a59b86
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2120/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:67188311a128c12b13fcff7c022d35edd9bb0fdcdc40fcd1a916c8ce085af32e
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_2120/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_2120/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2120/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2120/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_2120/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2120/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2120/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_2120/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2120/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_2120/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_2120/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2120/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2140/config.json b/deepscaler-1.5b-8k/actor/global_step_2140/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2140/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2140/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_2140/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2140/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2140/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_2140/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..ecb3ad57a64236d6c97918b1ecd9986c523346fa
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2140/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d527c22047c6dab8dc7f00225478087c0dea2b20785759c1b10969a96b4b5d08
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_2140/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_2140/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..2e7e6a9d3d50ce30cd6d28ae578a01d6df80bc8b
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2140/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e7b74d1bc47cefb616719a44f7d5f9a21d80a6be2a070270fb114a7000d65f8d
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_2140/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_2140/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2140/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2140/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_2140/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2140/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2140/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_2140/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2140/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_2140/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_2140/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2140/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2160/config.json b/deepscaler-1.5b-8k/actor/global_step_2160/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2160/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2160/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_2160/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2160/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2160/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_2160/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..4a3fec4609d2ee711a2b0c649e4666cdc3e4bf52
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2160/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:399ed6d911c386d3a6a786677fdf7f1b33139b3581ceb07ff928c17a4df2f955
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_2160/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_2160/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..bed62d6dd416ba1a64964ff7fc2ff58230957f65
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2160/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a5f8bf260a5c6bf078c047072dc0cdbf0473ba638b8f45ffd50c45b879b7eff8
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_2160/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_2160/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2160/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2160/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_2160/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2160/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2160/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_2160/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2160/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_2160/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_2160/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2160/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2180/config.json b/deepscaler-1.5b-8k/actor/global_step_2180/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2180/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2180/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_2180/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2180/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2180/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_2180/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..ce1ba823204a292db6242ded9553eba6506eb9ec
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2180/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c105e29420af3f4deee1d1dbdc6c5819065bf1454071c8e1ef56b2c7e6a1ced3
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_2180/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_2180/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..6f6bdbcdf562593a33e796495bf9fd3a799dea41
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2180/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c90c8727ee9705eaec5a3958c57244ecbe8708d3438527811c28e46727af58b4
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_2180/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_2180/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2180/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2180/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_2180/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2180/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2180/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_2180/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2180/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_2180/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_2180/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2180/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_220/config.json b/deepscaler-1.5b-8k/actor/global_step_220/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_220/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_220/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_220/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_220/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_220/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_220/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..c6118ef13a2c80424c80c45d83450cfcdfc875cd
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_220/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7d70b6bbd52d0703f26ad814473cbc34ba2d5d1c9589ca0a86cb5866c044c54d
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_220/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_220/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..dad77a872d2e8f2fe675ac7d91b987de30159f30
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_220/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8b14fa4a331cfb71b6032f7ead795f6c823d83b97fd638e3930e6708173d2686
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_220/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_220/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_220/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_220/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_220/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_220/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_220/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_220/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_220/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_220/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_220/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_220/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2200/config.json b/deepscaler-1.5b-8k/actor/global_step_2200/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2200/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2200/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_2200/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2200/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2200/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_2200/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..4fdb486fbb6a059e0911c1e09bdb72eba30cb507
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2200/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ee24b0c7afab4f17c806978c533b9dc1275e79bd53a7f8b0a567973bad55b2b1
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_2200/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_2200/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..98ccbcd94b46499881b6331cadd1c1df3453ccaf
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2200/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e3670e2fed5a46b9243895e5ef27cac2144d7a5c3a19c0d5df4ee66181ba1f3f
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_2200/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_2200/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2200/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2200/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_2200/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2200/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2200/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_2200/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2200/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_2200/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_2200/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2200/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2220/config.json b/deepscaler-1.5b-8k/actor/global_step_2220/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2220/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2220/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_2220/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2220/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2220/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_2220/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..022521a406b735071d1bc7b415862b49d101b84b
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2220/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8ba30a93725a4f4ade3bbf52bbeb5567df5b3eb5adc91e5d33dfbdc9dc8121e4
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_2220/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_2220/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..a47248443ce06c602b8c85c7765a75a1dacbaa9d
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2220/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a4d95733760331617ac28a3cdbe8c3da08ce1e50ad18d14da558ff83e7d89078
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_2220/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_2220/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2220/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2220/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_2220/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2220/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2220/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_2220/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2220/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_2220/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_2220/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2220/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2240/config.json b/deepscaler-1.5b-8k/actor/global_step_2240/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2240/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2240/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_2240/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2240/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2240/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_2240/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..d7c6ddff755da0ff1fdb6f6be1c61e58c861b846
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2240/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:be0a056b514b8a9be22c214ab66cf3bcfaa3904a102417f1fd3b4ba4ead6277b
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_2240/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_2240/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..1f65fa3fd12a66f6615af83aa2483ce809ef8161
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2240/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1bec033d1eaa33c7f311d21c69418cf65ef9bf26f645ddaad8693d003d0d9a5b
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_2240/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_2240/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2240/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2240/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_2240/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2240/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2240/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_2240/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2240/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_2240/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_2240/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2240/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2260/config.json b/deepscaler-1.5b-8k/actor/global_step_2260/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2260/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2260/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_2260/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2260/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2260/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_2260/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..dd030c9d3232a5611c4104664370c0d9b643fd0f
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2260/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a4c629a13e9a3974c8e355cadbb782656118ba281a9857ce128ec38f1628e601
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_2260/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_2260/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..41b8a29dbc18942c636872e3c363fdf7cf036d8f
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2260/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c453d67b995ea405206542485c3f1f155997d6a06a2f829357eb0a8e89973747
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_2260/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_2260/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2260/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2260/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_2260/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2260/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2260/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_2260/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2260/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_2260/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_2260/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2260/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2280/config.json b/deepscaler-1.5b-8k/actor/global_step_2280/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2280/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2280/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_2280/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2280/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2280/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_2280/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..c8bbdacd8b50c623a6d07a270a1873f9449113b2
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2280/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:54492960868892a39f4857145802458ffd27220e7d4ccc8510a584f431faa87a
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_2280/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_2280/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..4964669a6d6459b2b394237e8bf6f43896f9d83d
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2280/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a9cf864f81a05019f54414b9da5a2f4c08ef6b3589e4abbdb9d7c815361b27c9
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_2280/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_2280/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2280/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2280/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_2280/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2280/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2280/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_2280/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2280/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_2280/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_2280/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2280/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2300/config.json b/deepscaler-1.5b-8k/actor/global_step_2300/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2300/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2300/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_2300/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2300/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2300/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_2300/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..5f8066da13e63d7de261af4a17a86ca388b83966
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2300/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f0f64abd16987d009ce8809c9a7cc14ea5c2614df0170a2a5762f9d2280f0d79
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_2300/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_2300/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..d3f88de1f2013e1c9fd86747f946d8a76adfb7af
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2300/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a01b6148c9d7083aa5020c6a213fac49becbdd8ffec4ce0a2c22ce5d6143a2a5
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_2300/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_2300/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2300/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2300/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_2300/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2300/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2300/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_2300/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2300/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_2300/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_2300/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2300/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2320/config.json b/deepscaler-1.5b-8k/actor/global_step_2320/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2320/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2320/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_2320/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2320/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2320/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_2320/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..f0b38f1aa861623817e1a3fb9ee4e040c2d7ae0a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2320/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f46c0ee4dc9f2a8c4f863d55b5cd289c9263cd077affb892b180832070984585
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_2320/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_2320/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..d62560fefecaab970bd160784ce343d274ca6697
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2320/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:cecfe2390273e08a3e460f310d5378c11e7161d499c5f6caecbd63e8e62df6f4
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_2320/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_2320/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2320/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2320/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_2320/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2320/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2320/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_2320/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2320/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_2320/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_2320/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2320/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2340/config.json b/deepscaler-1.5b-8k/actor/global_step_2340/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2340/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2340/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_2340/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2340/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2340/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_2340/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..bbb4427d9e41efa9a504144f53d8be38cb174f6d
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2340/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f7a9b91ae2f4e3f340a83ae8666dce7099465c31955f9a81dba24a30a208910d
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_2340/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_2340/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..954e1ee30fe8e8cceac5d94273f5e4605b98270d
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2340/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0a30a978e8e65654f3503a0f8208a880319409a33f24c055838db95f21a30ab8
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_2340/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_2340/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2340/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2340/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_2340/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2340/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2340/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_2340/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2340/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_2340/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_2340/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2340/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2360/config.json b/deepscaler-1.5b-8k/actor/global_step_2360/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2360/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2360/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_2360/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2360/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2360/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_2360/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..ca7a9c0d2189639cefb36704234241d845c8215e
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2360/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:024ea7054c3ddc0b520e7e42fd47c4cc53ef930c417edde1fdfa8d57d17ce484
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_2360/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_2360/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..4addd2794d58027d881e43d98d16dfb3c76f2c64
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2360/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2fbdaae352106da0647488487cac5e947b4cb3329033bb2d45884e86d3ea15e2
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_2360/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_2360/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2360/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2360/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_2360/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2360/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2360/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_2360/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2360/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_2360/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_2360/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2360/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2380/config.json b/deepscaler-1.5b-8k/actor/global_step_2380/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2380/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2380/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_2380/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2380/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2380/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_2380/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..e6137bd68b50014cb2a04d75c6ccc90650fdaa49
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2380/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3963cce4a0b7c255e0a5b3f9c1be38c900b0cec6dbff3bc95a6b4b061860b456
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_2380/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_2380/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..4c42ff78d0ded678e5f3e45798bda5e247814f29
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2380/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:82f099a39d3abc4fb0e8060ebfd8cafbee35595035361353024b50c82688b21c
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_2380/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_2380/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2380/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2380/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_2380/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2380/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2380/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_2380/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2380/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_2380/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_2380/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2380/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_240/config.json b/deepscaler-1.5b-8k/actor/global_step_240/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_240/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_240/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_240/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_240/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_240/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_240/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..238140c5505deef3fa835011863e386b1a627158
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_240/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:da8346d3523802cf3e0f26f224665c9bdad9643823f44e724f929f464e43ac18
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_240/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_240/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..acccfa4c2c8d9c98b115c66e3f7c3fb9420513ae
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_240/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:25fa8c9a818f3e545bbbf606523e5ec9efa77c38434c83e2bb15bbecbf785088
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_240/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_240/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_240/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_240/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_240/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_240/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_240/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_240/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_240/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_240/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_240/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_240/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2400/config.json b/deepscaler-1.5b-8k/actor/global_step_2400/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2400/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2400/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_2400/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2400/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2400/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_2400/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..b18de37ec0b41be51c2a356f56c7cfd8fc3b6be9
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2400/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ae74e7cec96ffe9d6510d6204b9513187cd018bf7db9eb0761e569c074895f8e
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_2400/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_2400/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..8cb5eddcbf7f2b08d63ee9d76ad45f3de06862bf
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2400/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ccc9044e01e4d3e74a2451a5e13a617e1ae538187fe4a2a4b7c40397d49a0804
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_2400/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_2400/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2400/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2400/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_2400/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2400/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2400/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_2400/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2400/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_2400/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_2400/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2400/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2420/config.json b/deepscaler-1.5b-8k/actor/global_step_2420/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2420/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2420/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_2420/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2420/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2420/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_2420/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..71b5ed778d7f27af4393de890e2f5844b4945612
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2420/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:221bb0650560e9e65940ab182fe6f1fde93cefad658d428e62f6cbb931d78c08
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_2420/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_2420/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..d46e43bd8bebee42bbe6fe3a7d1e9dbb2232409b
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2420/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:bb5b1bd43916c68d46442af94e3554b90292926cbb34bd24d57a99cdf2c7e772
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_2420/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_2420/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2420/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2420/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_2420/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2420/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2420/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_2420/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2420/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_2420/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_2420/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2420/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2440/config.json b/deepscaler-1.5b-8k/actor/global_step_2440/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2440/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2440/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_2440/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2440/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2440/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_2440/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..24d635885753294f66dd443bc338aaabc74a45c1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2440/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0921bad88350c9c56e86d81401dad176d0ad1effb78f309dcac923da28f80907
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_2440/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_2440/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..bef214a10de2ca211d4b2187a58bb37ac4484d3a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2440/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:cef9a7c1e35322ae22ead82ac6692e89b9979f933760368d57af51e21f82abad
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_2440/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_2440/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2440/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2440/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_2440/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2440/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2440/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_2440/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2440/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_2440/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_2440/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2440/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2460/config.json b/deepscaler-1.5b-8k/actor/global_step_2460/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2460/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2460/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_2460/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2460/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2460/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_2460/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..8845713c0dbed7764f1638a9e1098ea549e08253
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2460/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d2b01c5fd051d19e22ded4af665ebd6dfdd56096f24fca3ac69f0d0624fc8f94
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_2460/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_2460/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..196ea69da4d36f919f4cabfbfe9921bd9654a899
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2460/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7c010e9773d00dcac1d48d9b1b7c229e2d4acae4329992d14a30edcb8deab9b0
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_2460/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_2460/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2460/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2460/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_2460/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2460/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2460/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_2460/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2460/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_2460/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_2460/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2460/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2480/config.json b/deepscaler-1.5b-8k/actor/global_step_2480/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2480/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2480/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_2480/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2480/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2480/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_2480/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..1065e3590d464a254f8ac2eaef67728b23d4cee0
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2480/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:288eca483d74f4e23b63892ced2bc1ba23078d7c253cb5a0afce795617ffae72
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_2480/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_2480/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..d9a1c9a87988c361886a9bfba1e3f30be3b5b28d
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2480/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9786a733b4233fe3f7e2711363272bc3dc2680911e239bcc410d29591607d841
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_2480/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_2480/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2480/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2480/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_2480/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2480/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2480/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_2480/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2480/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_2480/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_2480/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2480/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2500/config.json b/deepscaler-1.5b-8k/actor/global_step_2500/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2500/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2500/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_2500/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2500/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2500/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_2500/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..9cc94ff1bc6b99e7514b7c5067972e32dc9340a9
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2500/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3845dd8073705c7c163c05cd9b719cb926eaadc02696c91205b3c5ae8a2912cc
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_2500/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_2500/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..66a2d0982d1a77e647342de8ea21f16f9eb04f6c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2500/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:75a9d2af2c40f8ea1b05a0c23ddb000d07713291f0a9d33d1548ee3b7ac3ee01
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_2500/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_2500/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2500/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2500/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_2500/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2500/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2500/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_2500/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2500/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_2500/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_2500/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2500/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2520/config.json b/deepscaler-1.5b-8k/actor/global_step_2520/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2520/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2520/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_2520/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2520/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2520/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_2520/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..d906b67feffc850172a78605ba732d961a036a93
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2520/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7eb9548d0aede94af03a6e14b3b4595dde8c11c90c4fab1e7dfdbcb479a7b536
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_2520/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_2520/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..fbdad5d1295a008b6239ad832bdd86586c07d83d
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2520/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d31999f7ddc5f0a2b9c998a2d923168505b681df7199257f6dbd3864dc695a7a
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_2520/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_2520/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2520/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2520/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_2520/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2520/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2520/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_2520/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2520/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_2520/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_2520/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2520/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2540/config.json b/deepscaler-1.5b-8k/actor/global_step_2540/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2540/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2540/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_2540/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2540/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2540/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_2540/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..5dd60f7451f665984b49ce59f11f9277b984cae5
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2540/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:376e1b0864069b02e1f5df9119c1d98886bf1af881868b637780d3060d0ac868
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_2540/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_2540/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..18b7b93a2dde29bc55faf3260198ed6afb241ecf
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2540/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d754ae891f14f70f777d54102026e408bcda857ffa5619690911d07a3d5bec8f
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_2540/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_2540/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2540/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2540/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_2540/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2540/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2540/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_2540/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2540/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_2540/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_2540/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2540/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2560/config.json b/deepscaler-1.5b-8k/actor/global_step_2560/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2560/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2560/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_2560/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2560/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2560/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_2560/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..8440e702a952ebfd2167573acdecebdfa1428217
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2560/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0b63fd8146f41b5565f7aad9bcf59bf7238934115df77bfcab0ba3e90fcf2176
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_2560/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_2560/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..927ce3a99bad39cba14e1ccb73b9c6c335a3f4c6
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2560/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9f972268148aa3f7336b1ae31ce573bf781ab287ed0635ab282d654f6950eab4
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_2560/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_2560/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2560/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2560/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_2560/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2560/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2560/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_2560/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2560/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_2560/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_2560/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2560/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2580/config.json b/deepscaler-1.5b-8k/actor/global_step_2580/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2580/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2580/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_2580/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2580/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2580/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_2580/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..825b9236af413dca13a511db85262e6d5a8c7402
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2580/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:120a22c2f3e8d1b01b04ad8190870468fd2c7d72a30afd2cd729b06ed907d579
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_2580/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_2580/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..dcdcebc8a16dcfec35620c603fb6d7beb8f56725
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2580/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b7c12342b0f4d2d0c1163fd92038bf09b52ef2c0ea289d33b24ba06cfa2ba2a7
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_2580/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_2580/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2580/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2580/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_2580/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2580/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2580/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_2580/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2580/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_2580/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_2580/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2580/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_260/config.json b/deepscaler-1.5b-8k/actor/global_step_260/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_260/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_260/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_260/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_260/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_260/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_260/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..c819803dd5576ee6cc93697d1bbbfbc081774ac9
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_260/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7fe619c05a430a69bb38c618828f8313585d098b1583c6bb4dab6611a260162c
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_260/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_260/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..308df0140010d1d0c47d1ce62fbe6505e458887e
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_260/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:66d38b73dc0124975a90506257a9e27c81b68f9b0a859673b448557ef2513b96
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_260/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_260/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_260/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_260/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_260/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_260/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_260/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_260/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_260/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_260/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_260/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_260/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2600/config.json b/deepscaler-1.5b-8k/actor/global_step_2600/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2600/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2600/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_2600/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2600/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2600/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_2600/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..c02bb805c972144e3a25569b20a07cde6d617508
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2600/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:507c8a54efc11f2ee30603009805abc12eae986763308791aa2e11fcbf00701e
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_2600/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_2600/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..6b1ba2d98ee475fb82d51c4765e241304c114514
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2600/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0bd4bd43a81c269cf190ea12685f0433cf1c998057cfa9745b12daa7abde9704
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_2600/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_2600/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2600/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2600/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_2600/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2600/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2600/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_2600/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2600/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_2600/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_2600/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2600/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2620/config.json b/deepscaler-1.5b-8k/actor/global_step_2620/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2620/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2620/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_2620/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2620/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2620/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_2620/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..418285b92cec52726da56fea34742464ec1383b5
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2620/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:798d4f90ad1df4c8055729527a7d1103614348d93f002b42492723f6dee7c91a
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_2620/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_2620/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..35a3fb4aa03356d82edc992ebb1b1676faef9dd0
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2620/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:50dd7c286b82125bb4cc4f9c66261e1631833d274575091d4958fd6ef5ba145d
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_2620/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_2620/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2620/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2620/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_2620/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2620/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2620/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_2620/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2620/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_2620/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_2620/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2620/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2640/config.json b/deepscaler-1.5b-8k/actor/global_step_2640/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2640/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2640/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_2640/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2640/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2640/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_2640/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..2ad16bc63796b8f73f72c0fce894d0775189512a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2640/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:65b905e0145f9e5d343dd32f1ebbf7a0f65a8be5ef012bdd6001a68812c34a67
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_2640/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_2640/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..afca046c0447c3fd5958a25fd7f3dbc98f231fac
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2640/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4c08e117209201b2735b5b86ceb1fd8ef7efda3d2a449a04d5ad5e1c9cd44513
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_2640/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_2640/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2640/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2640/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_2640/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2640/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2640/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_2640/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2640/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_2640/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_2640/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2640/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2660/config.json b/deepscaler-1.5b-8k/actor/global_step_2660/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2660/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2660/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_2660/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2660/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2660/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_2660/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..55be78aa92044771871c08de6abea910cee8c395
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2660/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a2f8bb700c522f6a7082ab875caf119f25e1223c973b8488fb5edee570f1e9d0
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_2660/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_2660/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..9601441f583a97b3089dd997e57bd4f9e6bf438a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2660/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:56b2993fa47753d34894a28e20a0030f373fcf98f72b5962ad14032317b31980
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_2660/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_2660/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2660/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2660/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_2660/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2660/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2660/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_2660/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2660/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_2660/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_2660/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2660/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2680/config.json b/deepscaler-1.5b-8k/actor/global_step_2680/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2680/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2680/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_2680/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2680/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2680/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_2680/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..a2dd4ace5b9f4037f66c091099eb76c1afdb0be1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2680/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a2bd85425fe1b2c9382f744bb2b604d4661afdc14f2293a85ea4f36c7dc13923
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_2680/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_2680/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..85638bbca8fb2afa2fa44d991d7df1c7f041f7dd
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2680/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9f615f8eb30c1bbca7caad0bd5c223b45309cffd60fe2f66d0fd20834414fd5c
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_2680/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_2680/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2680/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2680/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_2680/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2680/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2680/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_2680/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2680/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_2680/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_2680/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2680/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2700/config.json b/deepscaler-1.5b-8k/actor/global_step_2700/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2700/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2700/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_2700/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2700/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2700/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_2700/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..f44e9f85e4909cb76132db765f5e92b72098a1c5
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2700/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0b0f6a8e481585a995ba88754266b9df929725ce3d42b04a450c9d33f8879314
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_2700/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_2700/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..df1b3f18ce3b053d5ee00a3ed1f9c0508443b894
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2700/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:164fb202de98b73f78052ef47d68c8eb843ff6f84ae403713dc19f0e82f52c6d
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_2700/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_2700/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2700/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2700/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_2700/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2700/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2700/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_2700/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2700/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_2700/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_2700/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2700/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2720/config.json b/deepscaler-1.5b-8k/actor/global_step_2720/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2720/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2720/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_2720/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2720/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2720/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_2720/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..a1b66c4465bfe0d584f84bf93ec6e56732639289
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2720/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0ac7e0e647d708611a6484ff43cb0e5c17e0942b1fb6941f2c2b2db805502836
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_2720/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_2720/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..17b60b36088d736af071fe54a36f965aad8bcd93
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2720/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:07e1e1be287f63b40779d00ac785817511f7d483f99216f078cb5165a579fcb7
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_2720/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_2720/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2720/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2720/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_2720/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2720/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2720/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_2720/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2720/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_2720/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_2720/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2720/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2740/config.json b/deepscaler-1.5b-8k/actor/global_step_2740/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2740/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2740/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_2740/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2740/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2740/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_2740/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..4ec9d57560c523839d93fd400e675886429c8e53
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2740/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:57be90524d0e488b8a24cdf79b3575065c0099ed4e790b6830d9dace443b06ad
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_2740/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_2740/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..dd5a4f6a08b391b2de9259db28e1e87aadee554e
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2740/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:837cd48f045e3f5556a70440280751099be5865718ed12bcc086a5587761364e
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_2740/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_2740/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2740/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2740/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_2740/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2740/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2740/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_2740/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2740/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_2740/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_2740/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2740/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2760/config.json b/deepscaler-1.5b-8k/actor/global_step_2760/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2760/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2760/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_2760/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2760/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2760/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_2760/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..db654c83e72cdf3fa58dff82fa8ea0282584a023
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2760/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ea6a457eae892671f0ad9aa90b5a7542b2a368377b29080174d0193851d45800
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_2760/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_2760/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..c380a400c9adab3196978ae6f0a583d25e05a8ae
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2760/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8dfdef4111c41a396c7f29cdd1efc49a517f8750503577450d5d084dcad37037
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_2760/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_2760/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2760/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2760/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_2760/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2760/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2760/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_2760/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2760/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_2760/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_2760/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2760/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2780/config.json b/deepscaler-1.5b-8k/actor/global_step_2780/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2780/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2780/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_2780/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2780/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2780/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_2780/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..b92e45d05d171cf6d13be2628957d2fd1aa28c9d
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2780/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1c93cbe2b8c5e27b535017b70b857902e72aea78082073b4b5a4fa44d991ca06
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_2780/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_2780/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..b869ea71f9e57ce609c1345a531be11bfabb1c26
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2780/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9bd6c127802d5b20e37d849dc38efc7637534e09a9b4ad6eb0e6bd7377b680ec
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_2780/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_2780/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2780/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2780/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_2780/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2780/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2780/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_2780/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2780/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_2780/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_2780/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2780/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_280/config.json b/deepscaler-1.5b-8k/actor/global_step_280/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_280/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_280/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_280/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_280/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_280/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_280/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..00309e116ee613f5b33005f211484f3e5d06e955
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_280/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b2e186a44c95bb4dd2e9d988344fed1e0c1ad5a38424a85197eb5d59c4e8283f
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_280/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_280/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..e08346a22a0f7fe04442fca70e9a46f250bbbbe1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_280/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e68091c1b7127870520366bacb5da2b2673c283b5ad8f3d0d753f060d50352db
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_280/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_280/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_280/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_280/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_280/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_280/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_280/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_280/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_280/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_280/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_280/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_280/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2800/config.json b/deepscaler-1.5b-8k/actor/global_step_2800/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2800/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2800/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_2800/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2800/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2800/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_2800/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..ea813cfac95d39957fff78b07ccebec1dd855191
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2800/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c0f5b7a96b18cf507b06b59bcb7bde59713abdfa066bcd4587d4a483d4d76383
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_2800/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_2800/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..25d8ea7d77d5ce00000913798a300a98f2bbe52a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2800/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:022f0a32a1ae33ab65f5b024cec6335c71759771ea265265582dd360a3473ac0
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_2800/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_2800/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2800/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2800/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_2800/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2800/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2800/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_2800/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2800/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_2800/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_2800/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2800/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2820/config.json b/deepscaler-1.5b-8k/actor/global_step_2820/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2820/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2820/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_2820/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2820/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2820/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_2820/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..aff97072f889d5ed9258e0847cb0edcf7aa76332
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2820/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e15ee251ace4d7c61cf2f44f4f998128bb7373aed1cabc137cc3e0f16247e3e0
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_2820/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_2820/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..e633052a6b4889e582ae7b5a699954053cb0064f
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2820/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e43eb28e72f0b3a195113a44a9f57c13baeb7554cb14db3870963a19886d017b
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_2820/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_2820/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2820/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2820/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_2820/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2820/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2820/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_2820/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2820/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_2820/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_2820/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2820/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2840/config.json b/deepscaler-1.5b-8k/actor/global_step_2840/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2840/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2840/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_2840/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2840/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2840/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_2840/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..6f467aa6232980e326b7014cf0fce45e7eedcbbf
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2840/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:323f72c2d86f778bda73655922c960e37557411727dff0e05faf4d202d35a949
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_2840/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_2840/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..87d8691728a10c5ed4e75a5ec6140152d37f224c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2840/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4b3afd9340f6bd43d14a921ac2437dfd662cee1dd25dadab00f181d69adb91d0
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_2840/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_2840/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2840/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2840/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_2840/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2840/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2840/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_2840/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2840/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_2840/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_2840/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2840/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2860/config.json b/deepscaler-1.5b-8k/actor/global_step_2860/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2860/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2860/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_2860/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2860/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2860/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_2860/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..dbc026bfd876149331e3b8aada1a178bfdc80580
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2860/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c99b83839c28293271022ac645e9c574cba5c47c0eaae9570962ac2eae0e8986
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_2860/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_2860/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..67ad26b5440c123594875faea22cbc090cc6263f
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2860/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b76a28ac9b5e199c0398af33c5babfbc87b05f68cf55bc7a1aad8ec1e02a6541
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_2860/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_2860/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2860/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2860/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_2860/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2860/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2860/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_2860/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2860/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_2860/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_2860/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2860/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2880/config.json b/deepscaler-1.5b-8k/actor/global_step_2880/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2880/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2880/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_2880/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2880/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2880/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_2880/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..116ec27528fedd761821189f2eb6d1f7c77aa389
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2880/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4d331874a400dc1ffa867e00f9dc9f7ab9e802cffb01d22b4557ad7f26ddad8b
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_2880/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_2880/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..62254dc13fddbcabb57170965d8c8c93e38aad4a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2880/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:93e3452e7697bdb8fd0fc58ad234720b8cbe2a664e8cce19a851e4dfbd0e8955
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_2880/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_2880/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2880/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2880/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_2880/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2880/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2880/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_2880/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2880/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_2880/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_2880/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2880/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2900/config.json b/deepscaler-1.5b-8k/actor/global_step_2900/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2900/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2900/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_2900/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2900/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2900/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_2900/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..d7eb1928ea8962013a1468ab5ebb09bf464f2b3c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2900/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:5e971efe54338894a02eda9587a9b0287bcf54f8752bedf04ced752e834387dd
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_2900/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_2900/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..5384c879c0030471ea945b904128f58f7fb9f47c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2900/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:59b3852315d5e3544a50a461f080b1322d8da01defe7ba165873a9e96f38ad6b
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_2900/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_2900/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2900/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2900/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_2900/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2900/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2900/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_2900/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2900/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_2900/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_2900/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2900/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2920/config.json b/deepscaler-1.5b-8k/actor/global_step_2920/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2920/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2920/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_2920/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2920/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2920/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_2920/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..247f0e279da4527a26f951a284c4d82f6b4cbd34
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2920/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:be8277de279e2d3895cccc286116a60e2741f712ad480c0a3a1bc6ab1e5173c3
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_2920/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_2920/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..3c6e08b3357b7972dccee69bd64f4fa32b760665
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2920/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8b0469ae8108d602e1a15080dd66bf35dd971539c0b7ea59246e8593def7207b
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_2920/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_2920/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2920/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2920/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_2920/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2920/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2920/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_2920/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2920/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_2920/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_2920/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2920/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2940/config.json b/deepscaler-1.5b-8k/actor/global_step_2940/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2940/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2940/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_2940/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2940/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2940/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_2940/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..2f6f3f90f2a1587c0d49e68ef85f43714fdfab90
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2940/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:595ebfd33d272029867e5a663fc21c682f19d85918dc42760ff76f617ac730f2
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_2940/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_2940/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..d0fb7ebf6a4aab8cf1d66371dc27224d46a7b7cb
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2940/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4fd8fd0a70d1eeaee9d882438a7182b33eed15c2a3be54521ba6f5854f556593
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_2940/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_2940/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2940/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2940/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_2940/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2940/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2940/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_2940/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2940/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_2940/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_2940/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2940/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2960/config.json b/deepscaler-1.5b-8k/actor/global_step_2960/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2960/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2960/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_2960/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2960/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2960/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_2960/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..2564b16c9126ac9ea491bcfd86e9da5b5912997a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2960/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:10fe8e7175fbcd57c2eb7fafbdc96ca7e3d84f4ede372abc93816ee2fa503078
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_2960/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_2960/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..b3eecf0e4e1d8725824dafd06055ae4fca64a6aa
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2960/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3bc082dad0a8949bb4b2d10f637a1daf30cee7acd4ed88fa2d1fb8cc34512a86
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_2960/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_2960/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2960/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2960/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_2960/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2960/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2960/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_2960/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2960/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_2960/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_2960/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2960/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2980/config.json b/deepscaler-1.5b-8k/actor/global_step_2980/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2980/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2980/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_2980/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2980/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2980/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_2980/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..71efd20965478f456fa239c326471c61c41ef6de
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2980/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:707b942ed22acec60cded154fac7d420769c58cc2ac7578b282b0f83b545dd6b
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_2980/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_2980/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..ae3cc2dfcafaef6c398a6581cb31054b50ab5f4d
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2980/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9de430b551316f85e6c099ffcf8ea12a641944178f033056bc714bc8d1b9d78b
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_2980/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_2980/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2980/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2980/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_2980/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2980/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_2980/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_2980/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2980/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_2980/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_2980/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_2980/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_300/config.json b/deepscaler-1.5b-8k/actor/global_step_300/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_300/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_300/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_300/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_300/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_300/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_300/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..e97b9844ff7ff99e042cd5c7ef2fa2d7b22f302a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_300/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3550b232488e9866a3ecf8b667199f170ba9ebe57d16a52bd43e482de0cd22ff
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_300/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_300/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..f5a7941d76d068a065e112ce222a89e93d0d940c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_300/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:87e769ba8e3ac3d7e6045cfc631c2884c3e94cdbd34131832927aae08d1801df
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_300/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_300/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_300/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_300/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_300/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_300/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_300/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_300/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_300/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_300/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_300/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_300/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_3000/config.json b/deepscaler-1.5b-8k/actor/global_step_3000/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_3000/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_3000/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_3000/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_3000/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_3000/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_3000/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..d02e98a2688c26387865551b9ace29317cea453c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_3000/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3308a14b9f4a54b20b17e8d1d3444a1f050e325b6424c539656527e9af6b434c
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_3000/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_3000/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..48b82b693fcbf352212ac089aa6130cc41bf8410
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_3000/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:67ab12bc8af55f9919fd2494bf4761fae7e75fc0e4354b42b54981b9ec9cf939
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_3000/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_3000/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_3000/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_3000/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_3000/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_3000/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_3000/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_3000/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_3000/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_3000/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_3000/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_3000/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_320/config.json b/deepscaler-1.5b-8k/actor/global_step_320/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_320/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_320/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_320/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_320/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_320/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_320/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..3a27a61af03f7a1b17a3f0737f875bbf50d10ca2
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_320/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:91919a03df82d70f3a4b90624944732754b5c893ba31ba306845ad7a4d7b2fad
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_320/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_320/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..05934b24e2b3323d88a7af147937ce177e6deace
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_320/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9f5fe746d6e890fdded6c6976f3a5bb6c73afbe1f999ff68732a49bca7081072
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_320/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_320/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_320/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_320/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_320/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_320/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_320/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_320/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_320/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_320/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_320/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_320/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_340/config.json b/deepscaler-1.5b-8k/actor/global_step_340/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_340/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_340/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_340/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_340/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_340/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_340/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..648bd15ab3076699889920c136ed9bf80c97b050
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_340/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7c1a21de9c86386d965ef60b21d2e76daeb2840886769dd7b6ae9aab3d62e808
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_340/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_340/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..0a61312723c13bcd8a3af6340151bec8d410ece0
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_340/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:412a232d3d7f860aad3f97a188a5dd50d3bfb199c1d6b4fb4f167db36c9f03fe
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_340/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_340/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_340/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_340/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_340/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_340/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_340/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_340/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_340/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_340/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_340/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_340/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_360/config.json b/deepscaler-1.5b-8k/actor/global_step_360/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_360/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_360/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_360/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_360/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_360/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_360/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..db56e690251cb9016c95a747e1457276537f4df4
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_360/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1e497048beef1acd80c66981d344f630930d179248b2cb0fd212b017c74f172b
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_360/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_360/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..079758bdae1d54b5d64563624b5f8bdb75af9ce4
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_360/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3bb7588cbd7bf20c9bc8d028f534b1bf8ceaa35f5360b859eff4c1567d90b1cf
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_360/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_360/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_360/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_360/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_360/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_360/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_360/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_360/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_360/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_360/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_360/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_360/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_380/config.json b/deepscaler-1.5b-8k/actor/global_step_380/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_380/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_380/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_380/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_380/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_380/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_380/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..739a1df1d63fbe22889c0c2404b1e77411330c10
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_380/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:65e7654bc6c6e697d8eb441a6805d6ca5b43f93fd5327e78f920b1fe83de8e25
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_380/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_380/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..b99927ed2b6bcf61ba51256893c019ae52d935c2
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_380/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:48a4170a21c368e71f5b176e11039d313754c65905efa8d2047ea784b3115453
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_380/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_380/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_380/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_380/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_380/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_380/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_380/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_380/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_380/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_380/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_380/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_380/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_40/config.json b/deepscaler-1.5b-8k/actor/global_step_40/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_40/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_40/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_40/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_40/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_40/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_40/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..c615611b080c228e97ba43eced185a0e4f3009a9
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_40/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e52589af3cb47f2a2e12ac32b97727ac7e22a81d8528cb7b8c5e1946fbdc73f1
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_40/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_40/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..3ec8fe63733dccdafb8facfca0c1084e9533cbba
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_40/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a7bf8d94532b82f42250453fecde60f43b8d9b917a4db392c17cc85421670333
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_40/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_40/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_40/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_40/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_40/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_40/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_40/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_40/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_40/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_40/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_40/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_40/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_400/config.json b/deepscaler-1.5b-8k/actor/global_step_400/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_400/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_400/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_400/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_400/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_400/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_400/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..18234098903f4ff178d1c593a8e97f48c191885f
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_400/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:fdb8b5a67d4bb721ca186c43cfd11491d675166a92f6ada23360cdf1f1b37a0a
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_400/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_400/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..b8470448ad2454252b946635f05d93edec67f8e6
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_400/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:feb26674f933eca50e0c4778e7e05248f984b2f38eb2a3bc197c192f04b91362
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_400/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_400/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_400/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_400/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_400/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_400/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_400/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_400/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_400/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_400/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_400/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_400/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_420/config.json b/deepscaler-1.5b-8k/actor/global_step_420/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_420/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_420/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_420/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_420/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_420/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_420/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..196e8911591fd79ccf40dd558171e5f01ad5ff6c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_420/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ace7f9ac4dc63a64e4c6a7203ad4f0b42f5fc2e7d995333622dd80efae410d44
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_420/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_420/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..2b233d7f1d30906c95e3eba60b8f400c2b1a29f1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_420/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:83b746efb82ebfc737d1b0ad1ffafc3ece7c4833c8bfad89d445c8229d65c3dc
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_420/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_420/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_420/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_420/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_420/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_420/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_420/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_420/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_420/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_420/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_420/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_420/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_440/config.json b/deepscaler-1.5b-8k/actor/global_step_440/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_440/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_440/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_440/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_440/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_440/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_440/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..2d718ec6b7a7d42360e70a994275b9fd35158568
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_440/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d6e9df6a71966f0f25c886022f9af35f79da1878c8861dbece6a5ffb30b9cdd8
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_440/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_440/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..be88a8b002c454a5ad961cecb3c331ecdeb7715c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_440/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:5cf2c633aba1fc316ed0a202cf6852da3c79a88d9679f987d0eaebcd324107d9
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_440/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_440/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_440/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_440/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_440/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_440/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_440/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_440/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_440/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_440/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_440/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_440/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_460/config.json b/deepscaler-1.5b-8k/actor/global_step_460/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_460/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_460/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_460/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_460/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_460/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_460/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..c3c26a049e25e851ee2fa094e1c7f207734f5708
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_460/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:144a650b3592a1e710c4c11ac4bbb9d9986e6a296b6a22796d3d77f880368726
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_460/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_460/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..953da46a89890685da1cfd4296a12ca99cfcfa20
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_460/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c3ad59b552c948178a7ab7232f94a496d1b5fa4a5ba2a959bbe095ae5711139a
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_460/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_460/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_460/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_460/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_460/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_460/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_460/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_460/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_460/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_460/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_460/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_460/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_480/config.json b/deepscaler-1.5b-8k/actor/global_step_480/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_480/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_480/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_480/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_480/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_480/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_480/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..b8e46d2867f25c52eb5fdfccc451921e34a248d3
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_480/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b4d6b1667c487e92b695ca481ac20a4be79a474fc751acb4e8ad59d079d75b0c
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_480/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_480/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..cf2fa6cab9a17c92f75ebfaf8c15d7ccd0eeb875
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_480/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:00dddfa5235ab0534272c031efb40a33758085776d6881a8b0b8961cf11d22dd
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_480/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_480/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_480/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_480/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_480/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_480/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_480/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_480/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_480/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_480/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_480/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_480/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_500/config.json b/deepscaler-1.5b-8k/actor/global_step_500/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_500/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_500/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_500/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_500/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_500/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_500/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..9655dda1433c927d98ee5be7d2d742544f248bd8
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_500/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3da25fa1fbf1032291f3d7f6d7652ad7787e10c13739cb689fd021e2fa27cb76
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_500/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_500/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..5a7240d86c4a6d14c3bbba32e888b1f2b723ba66
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_500/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9743b8092ebd36c17beb3619a3517cc4f64ff37567d62ee1fdb159fcf2c975e7
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_500/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_500/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_500/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_500/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_500/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_500/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_500/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_500/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_500/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_500/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_500/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_500/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_520/config.json b/deepscaler-1.5b-8k/actor/global_step_520/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_520/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_520/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_520/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_520/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_520/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_520/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..26782e88bbe9f8a5acf325a19616ea31479f9d2b
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_520/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a26bea3ca392083fb378ee83c83a74f2fff865aaee25c1126c41b87f87502f5d
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_520/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_520/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..7e02892d06176be09257418652686c6e657af060
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_520/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ab3f26938de389361d571eca7bbde684216975f3427979761b671156e39b0128
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_520/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_520/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_520/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_520/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_520/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_520/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_520/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_520/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_520/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_520/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_520/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_520/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_540/config.json b/deepscaler-1.5b-8k/actor/global_step_540/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_540/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_540/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_540/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_540/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_540/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_540/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..01928f861010d0aeedbcb7b75b36f81b4ccfcc11
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_540/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:356afddac35313e020cecea56a8c348b1932a3fce41db6dcb7cf901a5a109bd6
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_540/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_540/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..2efeb352523f77828348ba935c9975fcba1b0029
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_540/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2c55aab77e3eb2854c2bdd63562efeea2f41f6a42ee6bea82e14ebf9c20538bf
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_540/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_540/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_540/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_540/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_540/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_540/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_540/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_540/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_540/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_540/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_540/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_540/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_560/config.json b/deepscaler-1.5b-8k/actor/global_step_560/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_560/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_560/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_560/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_560/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_560/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_560/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..54988f5e874de156e80715c92f6b265d3fa63f6f
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_560/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:95d42e7453c2bffb8edd16762ffaf8cccd10548c226639dcc37215dd35191508
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_560/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_560/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..9219a6439b0969f36402f6b2f56d13a5a8a5bb83
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_560/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:48f62782b19e3465c9fca8d6586f2f9d49485fa565c83a9bd37b14dfedf05e42
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_560/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_560/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_560/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_560/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_560/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_560/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_560/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_560/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_560/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_560/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_560/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_560/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_580/config.json b/deepscaler-1.5b-8k/actor/global_step_580/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_580/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_580/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_580/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_580/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_580/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_580/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..106fda057305b352d715123876e5fdd7abdbcb20
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_580/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:af8cd3225f4d4ccd5bf32cdd5a3a39141e3a0d6db4c69d84656a5a2691373d70
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_580/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_580/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..d8033ef706689628d997450a52ddbf1ede8170a3
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_580/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2f0375eed68589fab51cda35fd1f1b67ddf763d065db1ec6d6eb255aea40af01
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_580/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_580/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_580/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_580/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_580/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_580/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_580/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_580/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_580/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_580/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_580/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_580/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_60/config.json b/deepscaler-1.5b-8k/actor/global_step_60/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_60/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_60/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_60/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_60/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_60/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_60/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..eac2c4764df348fb8e33483374da909ac820a528
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_60/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ea19eff7935897dc59d13ac8798d8013f237fc71139d0d335057ee6240b2e410
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_60/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_60/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..a4c65f1338dc97121cdde04172ebe79492351528
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_60/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9b4481ebc9226877c09c8d76aee061316527b7be77a3c68cefc1383aa296376e
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_60/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_60/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_60/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_60/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_60/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_60/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_60/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_60/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_60/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_60/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_60/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_60/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_600/config.json b/deepscaler-1.5b-8k/actor/global_step_600/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_600/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_600/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_600/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_600/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_600/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_600/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..3ed5ba91741985d1bcb2c7e1581a984aa75e3443
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_600/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:34b72b62831fb1a82e66efc03a379cbb76ce2f0ffd1f74a89e677726fad53776
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_600/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_600/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..e633e3a556faf0d1cae12dfedcd6ee63e3dde5f0
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_600/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7460f2ba4a3a6079b28d34817f73a84739898ef2df82a6df0931eaa9b9877edc
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_600/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_600/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_600/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_600/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_600/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_600/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_600/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_600/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_600/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_600/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_600/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_600/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_620/config.json b/deepscaler-1.5b-8k/actor/global_step_620/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_620/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_620/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_620/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_620/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_620/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_620/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..9d02d574c732232eb7471e356d300920045e7025
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_620/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a78f4155020c16594f3481046242eca1b7bc5bb4e48902c5cdc5d828bbe3ae74
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_620/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_620/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..1eeed4b0ddcaa609f20237912aae85b34dc3b6f7
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_620/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f1141f9bc53d621ba45f23ed7867f4a762ecf6f46752104624293c24657993d0
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_620/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_620/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_620/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_620/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_620/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_620/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_620/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_620/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_620/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_620/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_620/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_620/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_640/config.json b/deepscaler-1.5b-8k/actor/global_step_640/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_640/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_640/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_640/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_640/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_640/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_640/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..3997d17af700dd18a42b5cf3624804386e4302e7
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_640/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d6d01f58a90993fb2c5024abae283b191bc204c79e5484dfe2b5e883e55c93fb
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_640/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_640/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..ee42a271e710a99d5e6ee464bb3f5414c4da85bc
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_640/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e3df1eeb531ecadc74b6a4eabbc1e322dfb78de9a347c8c0b9860b5dc6ffcd0f
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_640/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_640/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_640/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_640/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_640/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_640/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_640/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_640/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_640/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_640/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_640/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_640/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_660/config.json b/deepscaler-1.5b-8k/actor/global_step_660/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_660/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_660/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_660/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_660/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_660/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_660/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..8872aa441feb829393f64da2cd0966e7d290f4dc
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_660/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0c064b7be013a667c53db3111088361489f278b8631ee4a8749f6fd059dfb621
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_660/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_660/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..29727ac9f44831be18185217b94bda176d76ec95
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_660/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:04281e523cc2e795b09373948e41feb9016499fd7256c604676371dfad82078a
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_660/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_660/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_660/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_660/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_660/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_660/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_660/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_660/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_660/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_660/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_660/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_660/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_680/config.json b/deepscaler-1.5b-8k/actor/global_step_680/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_680/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_680/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_680/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_680/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_680/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_680/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..0506b7f035a13b700a7c5c4976c94d6588fa07fb
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_680/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20989dc92a016c63dc4ce3e66ed3f1be82127c7412832c7a090737a9f0a4a22
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_680/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_680/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..9e982c2dbff01279617cfd8ba97b72ccf1d8bac3
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_680/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:93003a1acf78194dbe8cf04dc2886fd508d8d786ec8192ce8ad9e942f1826660
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_680/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_680/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_680/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_680/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_680/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_680/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_680/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_680/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_680/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_680/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_680/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_680/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_700/config.json b/deepscaler-1.5b-8k/actor/global_step_700/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_700/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_700/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_700/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_700/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_700/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_700/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..732bb8d8089a8d3c194445fb939fe5e1d7125b5d
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_700/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e5c048f9d0598b35f6ce90d6958e89f0bc6b02e65e8bd7594f4495fc4e5176aa
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_700/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_700/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..082b20d13bae65469a490b98b26cbaad1cddff2d
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_700/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1ba64319d79a7923c93f01378189f84298f20130829a73907e08ee1dc99584c2
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_700/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_700/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_700/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_700/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_700/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_700/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_700/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_700/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_700/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_700/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_700/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_700/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_720/config.json b/deepscaler-1.5b-8k/actor/global_step_720/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_720/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_720/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_720/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_720/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_720/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_720/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..bd219484b6232737148cad8361505fe8fadf936a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_720/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c8a1537b19822aeebd557228aba09e892422824894ca1e347bba86dd0cc0b22b
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_720/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_720/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..dfedb0806190039960a70b0244036b4d2f9d6119
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_720/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c50d9fe28881788c92aaa61a38e1366ef570d1c172bcf6c560237e2098c85154
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_720/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_720/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_720/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_720/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_720/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_720/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_720/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_720/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_720/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_720/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_720/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_720/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_740/config.json b/deepscaler-1.5b-8k/actor/global_step_740/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_740/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_740/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_740/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_740/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_740/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_740/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..48e0b62da969248d77a292d63c333623fd2d467c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_740/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a8f598338804ccac87956d9d409cfae3ae5b18971109056b1bf3581935e327a6
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_740/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_740/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..c2ca057687351031f1bcebf029ce0e41448884c8
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_740/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ed7739ab3062382230ee0a1ea54f8ae569b2b4fa91692c21f954d217cb952cf8
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_740/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_740/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_740/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_740/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_740/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_740/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_740/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_740/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_740/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_740/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_740/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_740/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_760/config.json b/deepscaler-1.5b-8k/actor/global_step_760/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_760/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_760/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_760/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_760/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_760/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_760/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..9589522643d7cf755c8213c1cd30c6c7da4c6513
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_760/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8bd25e68e26698e1e5d81d2b3b820104366e7640d4e92cbc35ad919be63fe88b
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_760/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_760/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..f1e8f4091a381143f06e8fd470929bb7d7689ac3
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_760/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:596d37d220572ded0a0ca603d703ae22f78152080099636d9c007ff53af5f561
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_760/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_760/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_760/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_760/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_760/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_760/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_760/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_760/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_760/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_760/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_760/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_760/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_780/config.json b/deepscaler-1.5b-8k/actor/global_step_780/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_780/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_780/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_780/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_780/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_780/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_780/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..0a26eb496df19c4dcd5c3265491ecd30543c2682
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_780/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:168a565df28341a74da55f066ea040687dfc7e49a4bbdf219d4fda8478f2b170
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_780/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_780/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..1c5fc3e3fd8002f098ad53f5a1fdd7a5f8c25c14
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_780/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7e8cd0c23fa135fbb05486f9d9b415ea2661be6f1361e65d3b66e61778bb8046
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_780/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_780/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_780/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_780/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_780/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_780/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_780/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_780/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_780/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_780/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_780/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_780/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_80/config.json b/deepscaler-1.5b-8k/actor/global_step_80/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_80/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_80/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_80/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_80/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_80/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_80/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..e9b67516a6b336d5c4a0224a6e07fa7c99e080af
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_80/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:258dc62806b871219f51f6135cbd487c758358c922cade2286644ea99711047a
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_80/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_80/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..8604d8dcccd4a1eb660469e72b9121eb8b28efd8
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_80/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a0c5448a6bc0029e298ebc93ddd012291d7730b611d8f8b1d2d70342eeca8894
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_80/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_80/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_80/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_80/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_80/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_80/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_80/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_80/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_80/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_80/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_80/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_80/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_800/config.json b/deepscaler-1.5b-8k/actor/global_step_800/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_800/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_800/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_800/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_800/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_800/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_800/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..94124aa9d584285e15ff693ec627c838d54d556d
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_800/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:dafef1d3bf58f94f0ccf345b8ef2d55f0a36260bde0dc3bfc29a371b0e8b307a
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_800/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_800/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..c8f211cf55c06b06e8cf1b44ec66e93e10516e18
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_800/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:abde041c599ed33e6ce5bd25621b4cf1db51980a979db4459dc645eb6956f2b3
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_800/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_800/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_800/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_800/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_800/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_800/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_800/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_800/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_800/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_800/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_800/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_800/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_820/config.json b/deepscaler-1.5b-8k/actor/global_step_820/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_820/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_820/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_820/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_820/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_820/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_820/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..a7cb9ad11f0287c4ecac6181a52687e0ba6e7c58
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_820/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1a6bb23e92813bd87d5db98c3f9694c271e15ce0db584fa995a83e4fc736af3d
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_820/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_820/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..abe74e910e7e3c3366016aa800d139916fcab989
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_820/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:550337b2af3dd1c28f4f3b913eafd2d51a3c4d3ea3df7a756e72686b83d4b0e3
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_820/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_820/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_820/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_820/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_820/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_820/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_820/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_820/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_820/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_820/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_820/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_820/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_840/config.json b/deepscaler-1.5b-8k/actor/global_step_840/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_840/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_840/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_840/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_840/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_840/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_840/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..8efabf27a5f89647acf9c03e10d2a9be940e1735
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_840/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:762d98ab8e5502ab066ab551fd72b273cd972170a08458b2855ae1a27d8fd0ec
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_840/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_840/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..198007361d90e1217016df96e8571c1d726af847
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_840/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9e23dceb4e74590d1ff3027130cc445ee6968450acaada1858c67daa36634ad4
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_840/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_840/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_840/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_840/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_840/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_840/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_840/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_840/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_840/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_840/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_840/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_840/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_860/config.json b/deepscaler-1.5b-8k/actor/global_step_860/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_860/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_860/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_860/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_860/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_860/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_860/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..ba3ba8c243388463932c3085383382dd337698aa
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_860/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a54136b45bba63dd392072f5e6683f2268929eb2905f4ededaebf86ab826260c
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_860/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_860/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..3e2bd8f6a0268fe469122dd4efaaa9e8fb99f6a5
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_860/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3cd760ecb45c197365f9500549e844355f75ee6cdd3aa1d9d233e627524ca479
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_860/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_860/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_860/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_860/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_860/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_860/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_860/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_860/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_860/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_860/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_860/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_860/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_880/config.json b/deepscaler-1.5b-8k/actor/global_step_880/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_880/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_880/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_880/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_880/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_880/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_880/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..d90c89a4cfa2259bfe4a67d2d9094df8758dda4b
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_880/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a6b481d213b1d8f2b0b89dc918302aeec3e4cd397fd4f5b54eaf26df0bc83a2a
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_880/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_880/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..d7bd29f563d291f6c5ed02ab228d3f15fba8fd91
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_880/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c6665e349387c4c524d55cbb55e7949c656c81b65894790966322d124e9affe1
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_880/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_880/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_880/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_880/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_880/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_880/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_880/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_880/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_880/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_880/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_880/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_880/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_900/config.json b/deepscaler-1.5b-8k/actor/global_step_900/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_900/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_900/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_900/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_900/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_900/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_900/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..44b06f578310a2446ca83adae22ad0efa79b3d3e
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_900/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:626ecb474c4599bcf427b9744f06c61b22bcc5aaf95dd515f055cc172d403d10
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_900/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_900/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..01a524d0b6d8413e07f96b10d47a44b8099d9940
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_900/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6cc96c8476a7676bb3a41e3db6f5217d581de43b42521a22e13d8761db9059a9
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_900/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_900/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_900/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_900/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_900/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_900/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_900/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_900/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_900/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_900/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_900/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_900/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_920/config.json b/deepscaler-1.5b-8k/actor/global_step_920/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_920/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_920/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_920/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_920/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_920/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_920/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..918f2e28959f219fb86dbb01d63a83f3ee397e1b
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_920/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1ab6762a615c644501436bb58908a2d926d93a12309371a9bb2b0e35170ef75b
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_920/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_920/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..dbbd453b140dc69f236b4fe1941cfa87bf036ddf
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_920/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:42d127068a6f64fabdc2e05a6be00e7559c7f584cb73e4270bd6f179f0be229d
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_920/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_920/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_920/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_920/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_920/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_920/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_920/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_920/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_920/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_920/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_920/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_920/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_940/config.json b/deepscaler-1.5b-8k/actor/global_step_940/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_940/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_940/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_940/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_940/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_940/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_940/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..b157ed70e2ce50271d99f6c765f39e0d01a09153
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_940/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3011fbaf6b8a3be01a1eafeebd44c29eb76d1f81c8aec870b105106e33dcc8f2
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_940/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_940/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..05fe85c082ea14e0fd3e693d498015a419235414
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_940/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c94cabe23aadb4d0f8b9b40aa6b800aadd0ea640711fa7e04c41e36e79e2bdc5
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_940/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_940/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_940/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_940/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_940/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_940/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_940/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_940/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_940/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_940/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_940/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_940/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_960/config.json b/deepscaler-1.5b-8k/actor/global_step_960/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_960/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_960/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_960/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_960/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_960/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_960/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..caca4fea0a3079fa518bc99a8a8feaefcc95466b
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_960/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:897fd8da48d31af70515d5caab54b6d43180586fab625ba2b26922f9fd0758af
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_960/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_960/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..5d1f785ef83e562f7fe3ac987c524def09fa1cbf
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_960/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2f3b6c71fa200f5ae38dd39767bcf4e824333e01229e4e15185e26ac79ebfed7
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_960/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_960/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_960/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_960/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_960/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_960/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_960/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_960/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_960/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_960/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_960/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_960/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_980/config.json b/deepscaler-1.5b-8k/actor/global_step_980/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3dc5b0f17cd42a4965eaa78e5ad2ebb4ea3c423
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_980/config.json
@@ -0,0 +1,31 @@
+{
+ "_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
+ "architectures": [
+ "Qwen2ForCausalLM"
+ ],
+ "attention_dropout": 0.0,
+ "bos_token_id": 151646,
+ "eos_token_id": 151643,
+ "hidden_act": "silu",
+ "hidden_size": 1536,
+ "initializer_range": 0.02,
+ "intermediate_size": 8960,
+ "max_position_embeddings": 131072,
+ "max_window_layers": 21,
+ "model_type": "qwen2",
+ "num_attention_heads": 12,
+ "num_hidden_layers": 28,
+ "num_key_value_heads": 2,
+ "pad_token_id": 151643,
+ "rms_norm_eps": 1e-06,
+ "rope_scaling": null,
+ "rope_theta": 10000,
+ "sliding_window": null,
+ "tie_word_embeddings": false,
+ "torch_dtype": "float32",
+ "transformers_version": "4.47.1",
+ "use_cache": true,
+ "use_mrope": false,
+ "use_sliding_window": false,
+ "vocab_size": 151936
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_980/generation_config.json b/deepscaler-1.5b-8k/actor/global_step_980/generation_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..c02e4cea6f0d6a72fd8085c9da6d655c95689362
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_980/generation_config.json
@@ -0,0 +1,9 @@
+{
+ "_from_model_config": true,
+ "bos_token_id": 151646,
+ "do_sample": true,
+ "eos_token_id": 151643,
+ "temperature": 0.6,
+ "top_p": 0.95,
+ "transformers_version": "4.47.1"
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_980/model-00001-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_980/model-00001-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..04a29675c7e6e1a3ba5587c9c20499863a2a57e8
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_980/model-00001-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a647a07867b54aaaa028af64dbe04d1fe11350a65736f83c90d16d9d6055725f
+size 4996670464
diff --git a/deepscaler-1.5b-8k/actor/global_step_980/model-00002-of-00002.safetensors b/deepscaler-1.5b-8k/actor/global_step_980/model-00002-of-00002.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..2c8f6e0d96c4bcc7cf98ca0f800fbab6419da9cc
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_980/model-00002-of-00002.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1ab903380c0a6d07a2b5495680485af694f99f7a2f8ad1f25ce83b86a640f56c
+size 2111719976
diff --git a/deepscaler-1.5b-8k/actor/global_step_980/model.safetensors.index.json b/deepscaler-1.5b-8k/actor/global_step_980/model.safetensors.index.json
new file mode 100644
index 0000000000000000000000000000000000000000..2277217d5e56a0aebb2b5b4bb5f330ada8c3ae1a
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_980/model.safetensors.index.json
@@ -0,0 +1,346 @@
+{
+ "metadata": {
+ "total_size": 7108352000
+ },
+ "weight_map": {
+ "lm_head.weight": "model-00002-of-00002.safetensors",
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
+ "model.norm.weight": "model-00002-of-00002.safetensors"
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_980/special_tokens_map.json b/deepscaler-1.5b-8k/actor/global_step_980/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..1d385d62cf08bca35254547902b792c243656ec1
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_980/special_tokens_map.json
@@ -0,0 +1,23 @@
+{
+ "bos_token": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/deepscaler-1.5b-8k/actor/global_step_980/tokenizer.json b/deepscaler-1.5b-8k/actor/global_step_980/tokenizer.json
new file mode 100644
index 0000000000000000000000000000000000000000..1a2db243e47cbc113f6b2ddcc388aeeb8fe1a94c
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_980/tokenizer.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e20ddafc659ba90242154b55275402edeca0715e5dbb30f56815a4ce081f4893
+size 11422778
diff --git a/deepscaler-1.5b-8k/actor/global_step_980/tokenizer_config.json b/deepscaler-1.5b-8k/actor/global_step_980/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..3240a4948682325cd96df18e9bf9eabbd3631a22
--- /dev/null
+++ b/deepscaler-1.5b-8k/actor/global_step_980/tokenizer_config.json
@@ -0,0 +1,195 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "add_prefix_space": null,
+ "added_tokens_decoder": {
+ "151643": {
+ "content": "<|end▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151644": {
+ "content": "<|User|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151645": {
+ "content": "<|Assistant|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151646": {
+ "content": "<|begin▁of▁sentence|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151647": {
+ "content": "<|EOT|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151648": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151649": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151650": {
+ "content": "<|quad_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151651": {
+ "content": "<|quad_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151652": {
+ "content": "<|vision_start|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151653": {
+ "content": "<|vision_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151654": {
+ "content": "<|vision_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151655": {
+ "content": "<|image_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151656": {
+ "content": "<|video_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "151657": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151658": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151659": {
+ "content": "<|fim_prefix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151660": {
+ "content": "<|fim_middle|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151661": {
+ "content": "<|fim_suffix|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151662": {
+ "content": "<|fim_pad|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151663": {
+ "content": "<|repo_name|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ },
+ "151664": {
+ "content": "<|file_sep|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": false
+ }
+ },
+ "bos_token": "<|begin▁of▁sentence|>",
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>\\n'}}{% endif %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "<|end▁of▁sentence|>",
+ "extra_special_tokens": {},
+ "legacy": true,
+ "model_max_length": 16384,
+ "pad_token": "<|end▁of▁sentence|>",
+ "sp_model_kwargs": {},
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": null,
+ "use_default_system_prompt": false
+}