Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,15 +1,12 @@
|
|
1 |
import torch
|
2 |
-
from transformers import AutoModelForCausalLM, AutoTokenizer
|
3 |
|
4 |
MODEL_ID = "rinna/bilingual-gpt-neox-4b-instruction-ppo"
|
5 |
|
6 |
-
# 8
|
7 |
-
quantization_config = BitsAndBytesConfig(load_in_8bit=True)
|
8 |
-
|
9 |
model = AutoModelForCausalLM.from_pretrained(
|
10 |
MODEL_ID,
|
11 |
-
|
12 |
-
device_map="auto"
|
13 |
)
|
14 |
|
15 |
tokenizer = AutoTokenizer.from_pretrained(MODEL_ID, use_fast=False)
|
|
|
1 |
import torch
|
2 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
3 |
|
4 |
MODEL_ID = "rinna/bilingual-gpt-neox-4b-instruction-ppo"
|
5 |
|
6 |
+
# モデルをロード(8ビット量子化を使用せず)
|
|
|
|
|
7 |
model = AutoModelForCausalLM.from_pretrained(
|
8 |
MODEL_ID,
|
9 |
+
device_map="auto" # 自動でCPU/GPUを選択
|
|
|
10 |
)
|
11 |
|
12 |
tokenizer = AutoTokenizer.from_pretrained(MODEL_ID, use_fast=False)
|