Spaces:
Sleeping
Sleeping
IC4T
commited on
Commit
·
1162edb
1
Parent(s):
8df799e
commit
Browse files- training/generate.py +2 -2
training/generate.py
CHANGED
@@ -33,13 +33,13 @@ def load_model_tokenizer_for_generate(
|
|
33 |
Returns:
|
34 |
Tuple[PreTrainedModel, PreTrainedTokenizer]: model and tokenizer
|
35 |
"""
|
36 |
-
|
37 |
# model = AutoModelForCausalLM.from_pretrained(
|
38 |
# pretrained_model_name_or_path, device_map="auto", trust_remote_code=True)#, cache_dir="/media/siiva/DataStore/LLMs/cache/dollyV2"
|
39 |
#)
|
40 |
model = AutoModelForCausalLM.from_pretrained(pretrained_model_name_or_path, model_type='dolly-v2')
|
41 |
|
42 |
-
tokenizer = AutoTokenizer.from_pretrained('gpt2')
|
43 |
|
44 |
|
45 |
return model, tokenizer
|
|
|
33 |
Returns:
|
34 |
Tuple[PreTrainedModel, PreTrainedTokenizer]: model and tokenizer
|
35 |
"""
|
36 |
+
tokenizer = AutoTokenizer.from_pretrained(pretrained_model_name_or_path, padding_side="left")#, cache_dir="/media/siiva/DataStore/LLMs/cache/dollyV2")
|
37 |
# model = AutoModelForCausalLM.from_pretrained(
|
38 |
# pretrained_model_name_or_path, device_map="auto", trust_remote_code=True)#, cache_dir="/media/siiva/DataStore/LLMs/cache/dollyV2"
|
39 |
#)
|
40 |
model = AutoModelForCausalLM.from_pretrained(pretrained_model_name_or_path, model_type='dolly-v2')
|
41 |
|
42 |
+
# tokenizer = AutoTokenizer.from_pretrained('gpt2')
|
43 |
|
44 |
|
45 |
return model, tokenizer
|