Hello, may I ask what dataset you are using? Is it open source or self-made?
#1
by
xttttttttt
- opened
- README.md +13 -43
- config.json +3 -4
- generation_config.json +1 -1
- model.safetensors +0 -3
- special_tokens_map.json +3 -21
- tokenizer_config.json +2 -3
README.md
CHANGED
@@ -1,6 +1,3 @@
|
|
1 |
-
---
|
2 |
-
{}
|
3 |
-
---
|
4 |
Small dummy LLama2-type Model useable for Unit/Integration tests. Suitable for CPU only machines, see [H2O LLM Studio](https://github.com/h2oai/h2o-llmstudio/blob/main/tests/integration/test_integration.py) for an example integration test.
|
5 |
|
6 |
Model was created as follows:
|
@@ -11,7 +8,7 @@ repo_name = "MaxJeblick/llama2-0b-unit-test"
|
|
11 |
model_name = "h2oai/h2ogpt-4096-llama2-7b-chat"
|
12 |
config = AutoConfig.from_pretrained(model_name)
|
13 |
config.hidden_size = 12
|
14 |
-
config.max_position_embeddings =
|
15 |
config.intermediate_size = 24
|
16 |
config.num_attention_heads = 2
|
17 |
config.num_hidden_layers = 2
|
@@ -27,44 +24,17 @@ tokenizer.push_to_hub(repo_name, private=False)
|
|
27 |
config.push_to_hub(repo_name, private=False)
|
28 |
```
|
29 |
|
30 |
-
Below is a small example that will run in ~ 1 second.
|
31 |
|
32 |
-
|
33 |
-
import torch
|
34 |
-
from transformers import AutoModelForCausalLM
|
35 |
-
|
36 |
-
|
37 |
-
def test_manual_greedy_generate():
|
38 |
-
max_new_tokens = 10
|
39 |
-
|
40 |
-
# note this is on CPU!
|
41 |
-
model = AutoModelForCausalLM.from_pretrained("MaxJeblick/llama2-0b-unit-test").eval()
|
42 |
-
input_ids = model.dummy_inputs["input_ids"]
|
43 |
-
|
44 |
-
y = model.generate(input_ids, max_new_tokens=max_new_tokens)
|
45 |
-
|
46 |
-
assert y.shape == (3, input_ids.shape[1] + max_new_tokens)
|
47 |
-
|
48 |
-
for _ in range(max_new_tokens):
|
49 |
-
with torch.no_grad():
|
50 |
-
outputs = model(input_ids)
|
51 |
-
|
52 |
-
next_token_logits = outputs.logits[:, -1, :]
|
53 |
-
next_token_id = torch.argmax(next_token_logits, dim=-1).unsqueeze(-1)
|
54 |
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
from transformers import AutoModelForCausalLM
|
67 |
-
@pytest.fixture(scope="session")
|
68 |
-
def model():
|
69 |
-
return AutoModelForCausalLM.from_pretrained("MaxJeblick/llama2-0b-unit-test").eval()
|
70 |
-
```
|
|
|
|
|
|
|
|
|
1 |
Small dummy LLama2-type Model useable for Unit/Integration tests. Suitable for CPU only machines, see [H2O LLM Studio](https://github.com/h2oai/h2o-llmstudio/blob/main/tests/integration/test_integration.py) for an example integration test.
|
2 |
|
3 |
Model was created as follows:
|
|
|
8 |
model_name = "h2oai/h2ogpt-4096-llama2-7b-chat"
|
9 |
config = AutoConfig.from_pretrained(model_name)
|
10 |
config.hidden_size = 12
|
11 |
+
config.max_position_embeddings = 32
|
12 |
config.intermediate_size = 24
|
13 |
config.num_attention_heads = 2
|
14 |
config.num_hidden_layers = 2
|
|
|
24 |
config.push_to_hub(repo_name, private=False)
|
25 |
```
|
26 |
|
|
|
27 |
|
28 |
+
Use the following configuration in [H2O LLM Studio](https://github.com/h2oai/h2o-llmstudio) to run a complete experiment in **5 seconds** using the default dataset and default settings otherwise:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
29 |
|
30 |
+
```yaml
|
31 |
+
Validation Size: 0.1
|
32 |
+
Data Sample: 0.1
|
33 |
+
Max Length Prompt: 32
|
34 |
+
Max Length Answer: 32
|
35 |
+
Max Length: 64
|
36 |
+
Backbone Dtype: float16
|
37 |
+
Gradient Checkpointing: False
|
38 |
+
Batch Size: 8
|
39 |
+
Max Length Inference: 16
|
40 |
+
```
|
|
|
|
|
|
|
|
|
|
config.json
CHANGED
@@ -4,14 +4,13 @@
|
|
4 |
"LlamaForCausalLM"
|
5 |
],
|
6 |
"attention_bias": false,
|
7 |
-
"attention_dropout": 0.0,
|
8 |
"bos_token_id": 1,
|
9 |
"eos_token_id": 2,
|
10 |
"hidden_act": "silu",
|
11 |
"hidden_size": 12,
|
12 |
"initializer_range": 0.02,
|
13 |
"intermediate_size": 24,
|
14 |
-
"max_position_embeddings":
|
15 |
"model_type": "llama",
|
16 |
"num_attention_heads": 2,
|
17 |
"num_hidden_layers": 2,
|
@@ -21,8 +20,8 @@
|
|
21 |
"rope_scaling": null,
|
22 |
"rope_theta": 10000.0,
|
23 |
"tie_word_embeddings": false,
|
24 |
-
"torch_dtype": "
|
25 |
-
"transformers_version": "4.
|
26 |
"use_cache": true,
|
27 |
"vocab_size": 32000
|
28 |
}
|
|
|
4 |
"LlamaForCausalLM"
|
5 |
],
|
6 |
"attention_bias": false,
|
|
|
7 |
"bos_token_id": 1,
|
8 |
"eos_token_id": 2,
|
9 |
"hidden_act": "silu",
|
10 |
"hidden_size": 12,
|
11 |
"initializer_range": 0.02,
|
12 |
"intermediate_size": 24,
|
13 |
+
"max_position_embeddings": 32,
|
14 |
"model_type": "llama",
|
15 |
"num_attention_heads": 2,
|
16 |
"num_hidden_layers": 2,
|
|
|
20 |
"rope_scaling": null,
|
21 |
"rope_theta": 10000.0,
|
22 |
"tie_word_embeddings": false,
|
23 |
+
"torch_dtype": "float32",
|
24 |
+
"transformers_version": "4.34.0",
|
25 |
"use_cache": true,
|
26 |
"vocab_size": 32000
|
27 |
}
|
generation_config.json
CHANGED
@@ -2,5 +2,5 @@
|
|
2 |
"_from_model_config": true,
|
3 |
"bos_token_id": 1,
|
4 |
"eos_token_id": 2,
|
5 |
-
"transformers_version": "4.
|
6 |
}
|
|
|
2 |
"_from_model_config": true,
|
3 |
"bos_token_id": 1,
|
4 |
"eos_token_id": 2,
|
5 |
+
"transformers_version": "4.34.0"
|
6 |
}
|
model.safetensors
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:5108f9b61c4c32b2ae72fd11c85535054ea4ffef80fa0fb8a2cd7c5d0e7de717
|
3 |
-
size 3085952
|
|
|
|
|
|
|
|
special_tokens_map.json
CHANGED
@@ -1,23 +1,5 @@
|
|
1 |
{
|
2 |
-
"bos_token":
|
3 |
-
|
4 |
-
|
5 |
-
"normalized": false,
|
6 |
-
"rstrip": false,
|
7 |
-
"single_word": false
|
8 |
-
},
|
9 |
-
"eos_token": {
|
10 |
-
"content": "</s>",
|
11 |
-
"lstrip": false,
|
12 |
-
"normalized": false,
|
13 |
-
"rstrip": false,
|
14 |
-
"single_word": false
|
15 |
-
},
|
16 |
-
"unk_token": {
|
17 |
-
"content": "<unk>",
|
18 |
-
"lstrip": false,
|
19 |
-
"normalized": false,
|
20 |
-
"rstrip": false,
|
21 |
-
"single_word": false
|
22 |
-
}
|
23 |
}
|
|
|
1 |
{
|
2 |
+
"bos_token": "<s>",
|
3 |
+
"eos_token": "</s>",
|
4 |
+
"unk_token": "<unk>"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
5 |
}
|
tokenizer_config.json
CHANGED
@@ -1,6 +1,4 @@
|
|
1 |
{
|
2 |
-
"add_bos_token": true,
|
3 |
-
"add_eos_token": false,
|
4 |
"added_tokens_decoder": {
|
5 |
"0": {
|
6 |
"content": "<unk>",
|
@@ -27,6 +25,7 @@
|
|
27 |
"special": true
|
28 |
}
|
29 |
},
|
|
|
30 |
"bos_token": "<s>",
|
31 |
"clean_up_tokenization_spaces": false,
|
32 |
"eos_token": "</s>",
|
@@ -37,5 +36,5 @@
|
|
37 |
"sp_model_kwargs": {},
|
38 |
"tokenizer_class": "LlamaTokenizer",
|
39 |
"unk_token": "<unk>",
|
40 |
-
"use_default_system_prompt":
|
41 |
}
|
|
|
1 |
{
|
|
|
|
|
2 |
"added_tokens_decoder": {
|
3 |
"0": {
|
4 |
"content": "<unk>",
|
|
|
25 |
"special": true
|
26 |
}
|
27 |
},
|
28 |
+
"additional_special_tokens": [],
|
29 |
"bos_token": "<s>",
|
30 |
"clean_up_tokenization_spaces": false,
|
31 |
"eos_token": "</s>",
|
|
|
36 |
"sp_model_kwargs": {},
|
37 |
"tokenizer_class": "LlamaTokenizer",
|
38 |
"unk_token": "<unk>",
|
39 |
+
"use_default_system_prompt": true
|
40 |
}
|