mrfakename redmoe-ai-v1 commited on
Commit
9d5c891
·
verified ·
0 Parent(s):

Duplicate from rednote-hilab/dots.llm1.base

Browse files

Co-authored-by: redmoe-ai-v1 <[email protected]>

This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +38 -0
  2. LICENSE +21 -0
  3. README.md +192 -0
  4. config.json +38 -0
  5. figures/XHSlong750px.png +0 -0
  6. figures/new_logo.png +3 -0
  7. figures/new_logo2.png +0 -0
  8. figures/performance.png +3 -0
  9. figures/wechat.png +3 -0
  10. generation_config.json +6 -0
  11. merges.txt +0 -0
  12. model-00001-of-00067.safetensors +3 -0
  13. model-00002-of-00067.safetensors +3 -0
  14. model-00003-of-00067.safetensors +3 -0
  15. model-00004-of-00067.safetensors +3 -0
  16. model-00005-of-00067.safetensors +3 -0
  17. model-00006-of-00067.safetensors +3 -0
  18. model-00007-of-00067.safetensors +3 -0
  19. model-00008-of-00067.safetensors +3 -0
  20. model-00009-of-00067.safetensors +3 -0
  21. model-00010-of-00067.safetensors +3 -0
  22. model-00011-of-00067.safetensors +3 -0
  23. model-00012-of-00067.safetensors +3 -0
  24. model-00013-of-00067.safetensors +3 -0
  25. model-00014-of-00067.safetensors +3 -0
  26. model-00015-of-00067.safetensors +3 -0
  27. model-00016-of-00067.safetensors +3 -0
  28. model-00017-of-00067.safetensors +3 -0
  29. model-00018-of-00067.safetensors +3 -0
  30. model-00019-of-00067.safetensors +3 -0
  31. model-00020-of-00067.safetensors +3 -0
  32. model-00021-of-00067.safetensors +3 -0
  33. model-00022-of-00067.safetensors +3 -0
  34. model-00023-of-00067.safetensors +3 -0
  35. model-00024-of-00067.safetensors +3 -0
  36. model-00025-of-00067.safetensors +3 -0
  37. model-00026-of-00067.safetensors +3 -0
  38. model-00027-of-00067.safetensors +3 -0
  39. model-00028-of-00067.safetensors +3 -0
  40. model-00029-of-00067.safetensors +3 -0
  41. model-00030-of-00067.safetensors +3 -0
  42. model-00031-of-00067.safetensors +3 -0
  43. model-00032-of-00067.safetensors +3 -0
  44. model-00033-of-00067.safetensors +3 -0
  45. model-00034-of-00067.safetensors +3 -0
  46. model-00035-of-00067.safetensors +3 -0
  47. model-00036-of-00067.safetensors +3 -0
  48. model-00037-of-00067.safetensors +3 -0
  49. model-00038-of-00067.safetensors +3 -0
  50. model-00039-of-00067.safetensors +3 -0
.gitattributes ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ figures/performance.png filter=lfs diff=lfs merge=lfs -text
37
+ figures/new_logo.png filter=lfs diff=lfs merge=lfs -text
38
+ figures/wechat.png filter=lfs diff=lfs merge=lfs -text
LICENSE ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) 2025 rednote-hilab
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
README.md ADDED
@@ -0,0 +1,192 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: mit
3
+ license_link: https://huggingface.co/rednote-hilab/dots.llm1.base/blob/main/LICENSE
4
+ library_name: transformers
5
+ language:
6
+ - en
7
+ - zh
8
+ ---
9
+
10
+ # dots1
11
+
12
+ <p align="center">
13
+ <img src="figures/new_logo2.png" width="300"/>
14
+ <p>
15
+
16
+ <p align="center">
17
+ &nbsp&nbsp🤗 <a href="https://huggingface.co/rednote-hilab">Hugging Face</a>&nbsp&nbsp | &nbsp&nbsp 📑 <a href="https://github.com/rednote-hilab/dots.llm1/blob/main/dots1_tech_report.pdf">Paper</a> &nbsp&nbsp
18
+ <br>
19
+ 🖥️ <a href="https://huggingface.co/spaces/rednote-hilab/dots-demo">Demo</a>&nbsp&nbsp | &nbsp&nbsp💬 <a href="figures/wechat.png">WeChat (微信)</a>&nbsp&nbsp | &nbsp&nbsp📕 <a href="https://www.xiaohongshu.com/user/profile/683ffe42000000001d021a4c">rednote</a>&nbsp&nbsp
20
+ </p>
21
+
22
+
23
+ Visit our Hugging Face (click links above), search checkpoints with names starting with `dots.llm1` or visit the [dots1 collection](https://huggingface.co/collections/rednote-hilab/dotsllm1-68246aaaaba3363374a8aa7c), and you will find all you need! Enjoy!
24
+
25
+
26
+ ## News
27
+
28
+ - 2025.06.06: We released the `dots.llm1` series. Check our [report](https://github.com/rednote-hilab/dots.llm1/blob/main/dots1_tech_report.pdf) for more details!
29
+
30
+
31
+ ## 1. Introduction
32
+
33
+
34
+ The `dots.llm1` model is a large-scale MoE model that activates 14B parameters out of a total of 142B parameters, delivering performance on par with state-of-the-art models.
35
+ Leveraging our meticulously crafted and efficient data processing pipeline, `dots.llm1` achieves performance comparable to Qwen2.5-72B after pretrained on 11.2T high-quality tokens without synthetic data. To foster further research, we open-source intermediate training checkpoints at every one trillion tokens, providing valuable insights into the learning dynamics of large language models.
36
+
37
+
38
+ <p align="center">
39
+ <img width="90%" src="./figures/performance.png">
40
+ </p>
41
+
42
+ ## 2. Model Summary
43
+
44
+ **This repo contains the base and instruction-tuned `dots.llm1` model**. which has the following features:
45
+
46
+ - Type: A MoE model with 14B activated and 142B total parameters trained on 11.2T tokens.
47
+ - Training Stages: Pretraining and SFT.
48
+ - Architecture: Multi-head Attention with QK-Norm in attention Layer, fine-grained MoE utilizing top-6 out of 128 routed experts, plus 2 shared experts.
49
+ - Number of Layers: 62
50
+ - Number of Attention Heads: 32
51
+ - Supported Languages: English, Chinese
52
+ - Context Length: 32,768 tokens
53
+ - License: MIT
54
+
55
+ The highlights from `dots.llm1` include:
56
+
57
+ - **Enhanced Data Processing**: We propose a scalable and fine-grained *three-stage* data processing framework designed to generate large-scale, high-quality and diverse data for pretraining.
58
+ - **No Synthetic Data during Pretraining**: *11.2 trillion* high-quality non-synthetic tokens was used in base model pretraining.
59
+ - **Performance and Cost Efficiency**: `dots.llm1` is an open-source model that activates only *14B* parameters at inference, delivering both comprehensive capabilities and high computational efficiency.
60
+ - **Infrastructure**: We introduce an innovative MoE all-to-all communication and computation overlapping recipe based on interleaved 1F1B pipeline scheduling and an efficient grouped GEMM implementation to boost computational efficiency.
61
+ - **Open Accessibility to Model Dynamics**: Intermediate model checkpoints for *every 1T tokens* trained are released, facilitating future research into the learning dynamics of large language models.
62
+
63
+ ## 3. Example Usage
64
+
65
+ ### Model Downloads
66
+
67
+ <div align="center">
68
+
69
+ | **Model** | **#Total Params** | **#Activated Params** | **Context Length** | **Download Link** |
70
+ | :------------: | :------------: | :------------: | :------------: | :------------: |
71
+ | dots.llm1.base | 142B | 14B | 32K | [🤗 Hugging Face](https://huggingface.co/rednote-hilab/dots.llm1.base) |
72
+ | dots.llm1.inst | 142B | 14B | 32K | [🤗 Hugging Face](https://huggingface.co/rednote-hilab/dots.llm1.inst) |
73
+
74
+ </div>
75
+
76
+ ### Docker (recommended)
77
+
78
+
79
+ The docker images are available on [Docker Hub](https://hub.docker.com/repository/docker/rednotehilab/dots1/tags), based on the official images.
80
+
81
+ You can start a server via vllm.
82
+
83
+ ```shell
84
+ docker run --gpus all \
85
+ -v ~/.cache/huggingface:/root/.cache/huggingface \
86
+ -p 8000:8000 \
87
+ --ipc=host \
88
+ rednotehilab/dots1:vllm-openai-v0.9.0.1 \
89
+ --model rednote-hilab/dots.llm1.inst \
90
+ --tensor-parallel-size 8 \
91
+ --trust-remote-code \
92
+ --served-model-name dots1
93
+ ```
94
+
95
+ Then you can verify whether the model is running successfully in the following way.
96
+
97
+ ```shell
98
+ curl http://localhost:8000/v1/chat/completions \
99
+ -H "Content-Type: application/json" \
100
+ -d '{
101
+ "model": "dots1",
102
+ "messages": [
103
+ {"role": "system", "content": "You are a helpful assistant."},
104
+ {"role": "user", "content": "Who won the world series in 2020?"}
105
+ ],
106
+ "max_tokens": 32,
107
+ "temperature": 0
108
+ }'
109
+ ```
110
+
111
+
112
+ ### Inference with huggingface
113
+
114
+ We are working to merge it into Transformers ([PR #38143](https://github.com/huggingface/transformers/pull/38143)).
115
+
116
+ #### Text Completion
117
+
118
+ ```python
119
+ import torch
120
+ from transformers import AutoTokenizer, AutoModelForCausalLM, GenerationConfig
121
+
122
+ model_name = "rednote-hilab/dots.llm1.base"
123
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
124
+
125
+ model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto", torch_dtype=torch.bfloat16)
126
+
127
+ text = "An attention function can be described as mapping a query and a set of key-value pairs to an output, where the query, keys, values, and output are all vectors. The output is"
128
+ inputs = tokenizer(text, return_tensors="pt")
129
+ outputs = model.generate(**inputs.to(model.device), max_new_tokens=100)
130
+ result = tokenizer.decode(outputs[0], skip_special_tokens=True)
131
+ print(result)
132
+ ```
133
+
134
+ #### Chat Completion
135
+
136
+ ```python
137
+ import torch
138
+ from transformers import AutoTokenizer, AutoModelForCausalLM, GenerationConfig
139
+
140
+ model_name = "rednote-hilab/dots.llm1.inst"
141
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
142
+
143
+ model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto", torch_dtype=torch.bfloat16)
144
+
145
+ messages = [
146
+ {"role": "user", "content": "Write a piece of quicksort code in C++"}
147
+ ]
148
+ input_tensor = tokenizer.apply_chat_template(messages, add_generation_prompt=True, return_tensors="pt")
149
+ outputs = model.generate(input_tensor.to(model.device), max_new_tokens=200)
150
+
151
+ result = tokenizer.decode(outputs[0][input_tensor.shape[1]:], skip_special_tokens=True)
152
+ print(result)
153
+ ```
154
+
155
+ ### Inference with vllm
156
+
157
+ [vLLM](https://github.com/vllm-project/vllm) is a high-throughput and memory-efficient inference and serving engine for LLMs. Official support for this feature is covered in [PR #18254](https://github.com/vllm-project/vllm/pull/18254).
158
+
159
+ ```shell
160
+ vllm serve dots.llm1.inst --port 8000 --tensor-parallel-size 8
161
+ ```
162
+
163
+ An OpenAI-compatible API will be available at `http://localhost:8000/v1`.
164
+
165
+ ### Inference with sglang
166
+
167
+ [SGLang](https://github.com/sgl-project/sglang) is a fast serving framework for large language models and vision language models. SGLang could be used to launch a server with OpenAI-compatible API service. Official support for this feature is covered in [PR #6471](https://github.com/sgl-project/sglang/pull/6471).
168
+
169
+ Getting started is as simple as running:
170
+
171
+ ```shell
172
+ python -m sglang.launch_server --model-path dots.llm1.inst --tp 8 --host 0.0.0.0 --port 8000
173
+ ```
174
+
175
+ An OpenAI-compatible API will be available at `http://localhost:8000/v1`.
176
+
177
+ ## 4. Evaluation Results
178
+
179
+ Detailed evaluation results are reported in this [📑 report](https://github.com/rednote-hilab/dots.llm1/blob/main/dots1_tech_report.pdf).
180
+
181
+ ## Citation
182
+
183
+ If you find `dots.llm1` is useful or want to use in your projects, please kindly cite our paper:
184
+
185
+ ```
186
+ @article{dots1,
187
+ title={dots.llm1 Technical Report},
188
+ author={rednote-hilab},
189
+ journal={arXiv preprint arXiv:TBD},
190
+ year={2025}
191
+ }
192
+ ```
config.json ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "Dots1ForCausalLM"
4
+ ],
5
+ "attention_bias": false,
6
+ "attention_dropout": 0.0,
7
+ "bos_token_id": null,
8
+ "eos_token_id": 151643,
9
+ "first_k_dense_replace": 1,
10
+ "hidden_act": "silu",
11
+ "hidden_size": 4096,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 10944,
14
+ "max_position_embeddings": 32768,
15
+ "model_type": "dots1",
16
+ "moe_intermediate_size": 1408,
17
+ "moe_layer_freq": 1,
18
+ "n_routed_experts": 128,
19
+ "n_shared_experts": 2,
20
+ "norm_topk_prob": true,
21
+ "num_attention_heads": 32,
22
+ "num_experts_per_tok": 6,
23
+ "num_hidden_layers": 62,
24
+ "num_key_value_heads": 32,
25
+ "pretraining_tp": 1,
26
+ "rms_norm_eps": 1e-05,
27
+ "rope_scaling": null,
28
+ "rope_theta": 10000000,
29
+ "routed_scaling_factor": 2.5,
30
+ "sliding_window": null,
31
+ "scoring_func": "noaux_tc",
32
+ "tie_word_embeddings": false,
33
+ "torch_dtype": "bfloat16",
34
+ "transformers_version": "4.46.3",
35
+ "use_cache": true,
36
+ "use_sliding_window": false,
37
+ "vocab_size": 152064
38
+ }
figures/XHSlong750px.png ADDED
figures/new_logo.png ADDED

Git LFS Details

  • SHA256: 2e5808698bcd60df90869af469743248a4560d0ffb2232eceb74cd9c0a7df763
  • Pointer size: 131 Bytes
  • Size of remote file: 101 kB
figures/new_logo2.png ADDED
figures/performance.png ADDED

Git LFS Details

  • SHA256: ca42a057f65c1ea12c303e41938dbe38fc285769002272af767b76605cf8ea98
  • Pointer size: 131 Bytes
  • Size of remote file: 139 kB
figures/wechat.png ADDED

Git LFS Details

  • SHA256: e6f386b64bd313bd998bf0f25e9f1b32c0fbbfe7d972a60227c22fdc044da885
  • Pointer size: 131 Bytes
  • Size of remote file: 118 kB
generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 151643,
4
+ "eos_token_id": 151643,
5
+ "transformers_version": "4.46.3"
6
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
model-00001-of-00067.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9d49fdd6709c3d0ce0dea361861380df8931b844c47380a686cc2bbe2f6c6a49
3
+ size 4290290528
model-00002-of-00067.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c4406c15b74a1bfda6b810945834e456313bb651b4e80928ebaff259b0da16d7
3
+ size 4287689952
model-00003-of-00067.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b5681d9f1d4f4e7712f1390e5781ecf5c715cdfe083df667fa70f4bb6a6da0ad
3
+ size 4287690208
model-00004-of-00067.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dc6e994dc951aa15c0473c92660c5caa25af4a02e8a3ff93c06fcaa2b61039dc
3
+ size 4287690208
model-00005-of-00067.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b41977295314b2a0d022068cbda68f0e89d5e5bf5d70e79f8830ac938515b9d4
3
+ size 4287690216
model-00006-of-00067.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6bf8c31cff99600fce52e7500b892a8851cbbb41ca16c4adcc318eef9d47edf6
3
+ size 4290819520
model-00007-of-00067.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b98f276b4eb94bb6e6beef33176f5dcdb07b3d1706f1af75a493d0485b83eb4b
3
+ size 4287690248
model-00008-of-00067.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1e7aa2e2b2c443edff36587803b20bb3dfec08c1c1d3a1b98c81e3817a0defe4
3
+ size 4287690248
model-00009-of-00067.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bdfcf2c2d5e7f664b51defb6160f4877bca129ee1b8401418283f9a33a12964c
3
+ size 4287690240
model-00010-of-00067.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:67f2c1d0f29a708c024341550ae32ccc1a9e9a1171cf1322aefdbcbb90f0e350
3
+ size 4287690232
model-00011-of-00067.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bc1adf542d3853f5c0ec52c4c01bff2d00138f3b6081de0b7b868b5f7cc07c41
3
+ size 4287690232
model-00012-of-00067.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ca9d244f137a1532b1e16751126a5fa09c87ba99499d34a7d355d7f63a44ffd3
3
+ size 4287690232
model-00013-of-00067.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:11242ffebd90a3445d399e0328eeddf34cfa3bdcdfffc3ddaa307aacf91afd3c
3
+ size 4287690072
model-00014-of-00067.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:520acecbd3265af0cdf3644121ca056b20df606a3ab7fc8121a60fecbdf9ed99
3
+ size 4287690000
model-00015-of-00067.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:abe2d1f90b0989498b0d646762364cc8d86176674aae06679b2145ac65c7c86e
3
+ size 4287690232
model-00016-of-00067.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ea87941b1e85a5d8137a3c6fce4b756a1ffe502440c166fec9f510c72dbb56f0
3
+ size 4287690208
model-00017-of-00067.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:52540eba653a20f9d491b2aa30425fece044ab153720de7009e53b8fee59d28b
3
+ size 4287690200
model-00018-of-00067.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b0a9959428b6d8756204d23a5bf319eac9b963148a2ff45e8bca9f774a2ce839
3
+ size 4287690208
model-00019-of-00067.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dfc5ed9f7ba6cfbe997deb9cd5566509b937ae10ff12349a3b378a641984d3fb
3
+ size 4280334120
model-00020-of-00067.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:26d50d67c9195dc5db9ee4619ea4d4e4ccc18b859e8f44aff47a91dab6ad5a5a
3
+ size 4286641184
model-00021-of-00067.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:94246a1b2bc662d84622e79121c1308e7a8a7518ad118e1448f057543b7fe72c
3
+ size 4287690248
model-00022-of-00067.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a9f1b05307d49d8b4dcd47fdc21414fdbd9b7ca03c2e7d147b98b11538d01227
3
+ size 4287690248
model-00023-of-00067.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d2b8903626d00cc89609571a57700db263b178f6af3745804f0acc79854867d2
3
+ size 4287690232
model-00024-of-00067.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:89384643591a0ba8a39aa01ecb5f542bd981e3aa886c2e0b2577ea5b224a2558
3
+ size 4287690232
model-00025-of-00067.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:73040aeda4fce0a83d772149314d79b28b97358559a6cef1542b2f7088fd175e
3
+ size 4287690032
model-00026-of-00067.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:102a06dadca9106fb4a21150154b019512ff65f5173e3cbcc9b75a30510e7619
3
+ size 4287690040
model-00027-of-00067.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:74a0323d50e7d98b4ba79ed45503b9fbc6a3cc359046a8593a4aecb3e69ee512
3
+ size 4287690232
model-00028-of-00067.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f9f11bc0fdacc010614211f494b95b980ca43fb2475894e066bb0176b9f42db6
3
+ size 4287690232
model-00029-of-00067.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:812d937410397a7f5a651c81397521ef67e2223a3d01e39a31dba59df14c0924
3
+ size 4287690216
model-00030-of-00067.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:63ae456aec8e210e6430e18e3bd4f986e6eb9f0f65a9708cd5f3112cd19fdce6
3
+ size 4287690208
model-00031-of-00067.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fce896c941352359c60fd765316d6d6dd0a283d13b2c4abd501fbd01a95b283c
3
+ size 4287690208
model-00032-of-00067.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:91a9a9a0465f602e33a98dbb211ad396a13449c9faf55879b6a783aaa7a40421
3
+ size 4289779584
model-00033-of-00067.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ce843941e1fd7591ca10e2d5725dfc0cd575ed38c238b2f5d6be6e761b24b2c5
3
+ size 4288730168
model-00034-of-00067.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ca34bf5695346747c6795c03b1d01d8f7158b291b0c3555bb87c15c180b211f3
3
+ size 4287690248
model-00035-of-00067.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:57e51388ffc129840e1d3cd21b2e1a13fe9b93da77ecb756d80a9d39114d1a17
3
+ size 4287690248
model-00036-of-00067.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:83d99c7a1d9daea01217c5432aff47ada42fad3c9d7183ec7d8f76525b81f955
3
+ size 4287690240
model-00037-of-00067.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4d8fd47f7486afcc47293209e17278b4371a6c819b510e11401a633c19232e79
3
+ size 4287689984
model-00038-of-00067.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:28ccc69f954f3305993644738349cf64991770f2e17cdcb3aa71acf87e57d827
3
+ size 4287690088
model-00039-of-00067.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cd3585b10b1ac4a49f8f1fd778a98234e5b6870c4021dc49af99b3362e9ef84a
3
+ size 4287690232