fix model args
Browse files
README.md
CHANGED
@@ -35,7 +35,7 @@ import torch
|
|
35 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
36 |
|
37 |
tokenizer = AutoTokenizer.from_pretrained("your_path_to_freewilly", use_fast=False)
|
38 |
-
model = AutoModelForCausalLM.from_pretrained("your_path_to_freewilly", torch_dtype=torch.float16, low_cpu_mem_usage=True,
|
39 |
|
40 |
system_prompt = "Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.\n\n"
|
41 |
system_prompt += "### Instruction:\nYou are Free Willy, an AI that follows instructions extremely well. Help as much as you can. Remember, be safe, and don't do anything illegal.\n\n"
|
|
|
35 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
36 |
|
37 |
tokenizer = AutoTokenizer.from_pretrained("your_path_to_freewilly", use_fast=False)
|
38 |
+
model = AutoModelForCausalLM.from_pretrained("your_path_to_freewilly", torch_dtype=torch.float16, low_cpu_mem_usage=True, device_map="auto")
|
39 |
|
40 |
system_prompt = "Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.\n\n"
|
41 |
system_prompt += "### Instruction:\nYou are Free Willy, an AI that follows instructions extremely well. Help as much as you can. Remember, be safe, and don't do anything illegal.\n\n"
|