Update README.md
Browse files
README.md
CHANGED
@@ -63,7 +63,6 @@ def inference(datasets, model, tokenizer):
|
|
63 |
### 回答:
|
64 |
"""
|
65 |
|
66 |
-
# 修正箇所: encode_plus を使用して attention_mask を取得
|
67 |
encoded_input = tokenizer.encode_plus(
|
68 |
prompt,
|
69 |
add_special_tokens=False,
|
@@ -75,7 +74,6 @@ def inference(datasets, model, tokenizer):
|
|
75 |
tokenized_input = encoded_input["input_ids"]
|
76 |
attention_mask = encoded_input["attention_mask"]
|
77 |
|
78 |
-
# 修正箇所: attention_mask と pad_token_id を model.generate に渡す
|
79 |
with torch.no_grad():
|
80 |
outputs = model.generate(
|
81 |
tokenized_input,
|
@@ -83,7 +81,7 @@ def inference(datasets, model, tokenizer):
|
|
83 |
max_new_tokens=100,
|
84 |
do_sample=False,
|
85 |
repetition_penalty=1.2,
|
86 |
-
pad_token_id=tokenizer.pad_token_id
|
87 |
)[0]
|
88 |
|
89 |
output = tokenizer.decode(
|
@@ -102,7 +100,7 @@ def inference(datasets, model, tokenizer):
|
|
102 |
model_name = "ak0327/llm-jp-3-13b-ft-5"
|
103 |
|
104 |
model, tokenizer = load_model(model_name)
|
105 |
-
datasets = load_test_datasets()
|
106 |
results = inference(model_name, datasets, model, tokenizer)
|
107 |
```
|
108 |
|
|
|
63 |
### 回答:
|
64 |
"""
|
65 |
|
|
|
66 |
encoded_input = tokenizer.encode_plus(
|
67 |
prompt,
|
68 |
add_special_tokens=False,
|
|
|
74 |
tokenized_input = encoded_input["input_ids"]
|
75 |
attention_mask = encoded_input["attention_mask"]
|
76 |
|
|
|
77 |
with torch.no_grad():
|
78 |
outputs = model.generate(
|
79 |
tokenized_input,
|
|
|
81 |
max_new_tokens=100,
|
82 |
do_sample=False,
|
83 |
repetition_penalty=1.2,
|
84 |
+
pad_token_id=tokenizer.pad_token_id
|
85 |
)[0]
|
86 |
|
87 |
output = tokenizer.decode(
|
|
|
100 |
model_name = "ak0327/llm-jp-3-13b-ft-5"
|
101 |
|
102 |
model, tokenizer = load_model(model_name)
|
103 |
+
datasets = load_test_datasets() # your datasets
|
104 |
results = inference(model_name, datasets, model, tokenizer)
|
105 |
```
|
106 |
|