Update app.py
Browse files
app.py
CHANGED
@@ -7,13 +7,13 @@ import sentencepiece
|
|
7 |
|
8 |
model_id = "01-ai/Yi-34B-200K"
|
9 |
|
10 |
-
os.environ['PYTORCH_CUDA_ALLOC_CONF'] = 'max_split_size_mb:54'
|
11 |
-
device = "cuda" if torch.cuda.is_available() else "cpu"
|
12 |
|
13 |
tokenizer = YiTokenizer(vocab_file="./tokenizer.model")
|
14 |
-
model = AutoModelForCausalLM.from_pretrained(model_id, trust_remote_code=True)
|
15 |
-
model =
|
16 |
-
model = model.to(device)
|
17 |
|
18 |
def run(message, chat_history, max_new_tokens=4056, temperature=3.5, top_p=0.9, top_k=800):
|
19 |
prompt = get_prompt(message, chat_history)
|
|
|
7 |
|
8 |
model_id = "01-ai/Yi-34B-200K"
|
9 |
|
10 |
+
# os.environ['PYTORCH_CUDA_ALLOC_CONF'] = 'max_split_size_mb:54'
|
11 |
+
# device = "cuda" if torch.cuda.is_available() else "cpu"
|
12 |
|
13 |
tokenizer = YiTokenizer(vocab_file="./tokenizer.model")
|
14 |
+
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", load_in_8bit=True, trust_remote_code=True)
|
15 |
+
# model = AutoModelForCausalLM.from_pretrained(model_id, trust_remote_code=True)
|
16 |
+
# model = model.to(device)
|
17 |
|
18 |
def run(message, chat_history, max_new_tokens=4056, temperature=3.5, top_p=0.9, top_k=800):
|
19 |
prompt = get_prompt(message, chat_history)
|