Kohsaku commited on
Commit
567151a
·
verified ·
1 Parent(s): 8448b67
Files changed (1) hide show
  1. README.md +45 -2
README.md CHANGED
@@ -22,11 +22,14 @@ This gemma2 model was trained 2x faster with [Unsloth](https://github.com/unslot
22
 
23
  [<img src="https://raw.githubusercontent.com/unslothai/unsloth/main/images/unsloth%20made%20with%20love.png" width="200"/>](https://github.com/unslothai/unsloth)
24
 
25
- Sample Use
 
 
 
26
  ``` pip install
27
  # Colabratory例
28
  !pip uninstall unsloth -y
29
- !pip install -q --upgrade --no-cache-dir "unsloth[colab-new] @ git+https://github.com/niryuu/unsloth.git@use-exact-model-name"
30
  !pip install --upgrade torch
31
  !pip install --upgrade xformers
32
  !pip install ipywidgets --upgrade
@@ -40,6 +43,7 @@ if torch.cuda.get_device_capability()[0] >= 8:
40
  from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
41
  from unsloth import FastLanguageModel
42
  import torch
 
43
 
44
  model_name = "Kohsaku/gemma-2-9b-finetune-4"
45
 
@@ -69,4 +73,43 @@ with torch.no_grad():
69
  repetition_penalty=1.2
70
  )[0]
71
  print(tokenizer.decode(output))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
72
  ```
 
22
 
23
  [<img src="https://raw.githubusercontent.com/unslothai/unsloth/main/images/unsloth%20made%20with%20love.png" width="200"/>](https://github.com/unslothai/unsloth)
24
 
25
+ 推論コード
26
+
27
+ なお、環境変数 HF_TOKENは別途設定されているものとします。
28
+
29
  ``` pip install
30
  # Colabratory例
31
  !pip uninstall unsloth -y
32
+ !pip install --upgrade --no-cache-dir "unsloth[colab-new] @ git+https://github.com/unslothai/unsloth.git"
33
  !pip install --upgrade torch
34
  !pip install --upgrade xformers
35
  !pip install ipywidgets --upgrade
 
43
  from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
44
  from unsloth import FastLanguageModel
45
  import torch
46
+ import json
47
 
48
  model_name = "Kohsaku/gemma-2-9b-finetune-4"
49
 
 
73
  repetition_penalty=1.2
74
  )[0]
75
  print(tokenizer.decode(output))
76
+
77
+ # ELYZA-tasks-100-TVによる評価
78
+ # ELYZA-tasks-100-TVの読み込み。事前にファイルをアップロードしてください
79
+ # データセットの読み込み。
80
+ # omnicampusの開発環境では、左にタスクのjsonlをドラッグアンドドロップしてから実行。
81
+ datasets = []
82
+ with open("elyza-tasks-100-TV_0.jsonl", "r") as f:
83
+ item = ""
84
+ for line in f:
85
+ line = line.strip()
86
+ item += line
87
+ if item.endswith("}"):
88
+ datasets.append(json.loads(item))
89
+ item = ""
90
+
91
+ # 学習したモデルを用いてタスクを実行
92
+ from tqdm import tqdm
93
+
94
+ # 推論するためにモデルのモードを変更
95
+ FastLanguageModel.for_inference(model)
96
+
97
+ results = []
98
+ for dt in tqdm(datasets):
99
+ input = dt["input"]
100
+
101
+ prompt = f"""### 指示\n{input}\n### 回答\n"""
102
+
103
+ inputs = tokenizer([prompt], return_tensors = "pt").to(model.device)
104
+
105
+ outputs = model.generate(**inputs, max_new_tokens = 512, use_cache = True, do_sample=False, repetition_penalty=1.2)
106
+ prediction = tokenizer.decode(outputs[0], skip_special_tokens=True).split('\n### 回答')[-1]
107
+
108
+ results.append({"task_id": dt["task_id"], "input": input, "output": prediction})
109
+
110
+ # jsonlで保存
111
+ with open(f"{model_name.split('/')[-1]}_outputs.jsonl", 'w', encoding='utf-8') as f:
112
+ for result in results:
113
+ json.dump(result, f, ensure_ascii=False)
114
+ f.write('\n')
115
  ```