Update utils.py
Browse files
utils.py
CHANGED
|
@@ -78,14 +78,14 @@ def generate_prompt_with_history(text, history, tokenizer, max_length=2048):
|
|
| 78 |
#model = AutoModelForCausalLM.from_pretrained("dbmdz/bert-base-italian-cased")
|
| 79 |
#tokenizer = AutoTokenizer.from_pretrained("asi/gpt-fr-cased-small")
|
| 80 |
#model = AutoModelForCausalLM.from_pretrained("asi/gpt-fr-cased-small")
|
| 81 |
-
#tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-neo-
|
| 82 |
-
#model = AutoModelForCausalLM.from_pretrained("EleutherAI/gpt-neo-
|
| 83 |
-
tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-neo-
|
| 84 |
-
model = AutoModelForCausalLM.from_pretrained("EleutherAI/gpt-neo-
|
| 85 |
|
| 86 |
|
| 87 |
def load_tokenizer_and_model(base_model,load_8bit=False):
|
| 88 |
-
base_model = "EleutherAI/gpt-neo-
|
| 89 |
if torch.cuda.is_available():
|
| 90 |
device = "cuda"
|
| 91 |
else:
|
|
|
|
| 78 |
#model = AutoModelForCausalLM.from_pretrained("dbmdz/bert-base-italian-cased")
|
| 79 |
#tokenizer = AutoTokenizer.from_pretrained("asi/gpt-fr-cased-small")
|
| 80 |
#model = AutoModelForCausalLM.from_pretrained("asi/gpt-fr-cased-small")
|
| 81 |
+
#tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-neo-2.7B")
|
| 82 |
+
#model = AutoModelForCausalLM.from_pretrained("EleutherAI/gpt-neo-2.7B")
|
| 83 |
+
tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-neo-1.3B")
|
| 84 |
+
model = AutoModelForCausalLM.from_pretrained("EleutherAI/gpt-neo-1.3B")
|
| 85 |
|
| 86 |
|
| 87 |
def load_tokenizer_and_model(base_model,load_8bit=False):
|
| 88 |
+
base_model = "EleutherAI/gpt-neo-1.3B"
|
| 89 |
if torch.cuda.is_available():
|
| 90 |
device = "cuda"
|
| 91 |
else:
|