Spaces:
Runtime error
Runtime error
Commit
Β·
0b1fa72
1
Parent(s):
15e6683
Try with the original gptj checkpoint
Browse files
app.py
CHANGED
@@ -3,19 +3,20 @@ import torch
|
|
3 |
from transformers import pipeline, GPTJForCausalLM, AutoModelForCausalLM
|
4 |
from peft import LoraConfig, get_peft_model, PeftModel, PeftConfig
|
5 |
|
6 |
-
config = PeftConfig.from_pretrained("hackathon-somos-nlp-2023/bertin-gpt-j-6b-ner-es")
|
7 |
-
model = AutoModelForCausalLM.from_pretrained("hackathon-somos-nlp-2023/bertin-gpt-j-6b-ner-es", return_dict=True, load_in_8bit=True, device_map='auto')
|
8 |
|
9 |
-
# # load
|
10 |
-
#
|
|
|
|
|
|
|
11 |
|
12 |
-
# config = AutoConfig.from_pretrained("hackathon-somos-nlp-2023/bertin-gpt-j-6b-ner-es", name_or_path="adapter_model.bin")
|
13 |
|
14 |
-
# load
|
15 |
-
|
16 |
|
17 |
-
|
18 |
-
model = PeftModel.from_pretrained(model, "hackathon-somos-nlp-2023/bertin-gpt-j-6b-ner-es")
|
19 |
|
20 |
# create pipeline
|
21 |
pipe = pipeline("text-generation", model=model, config=config, tokenizer=tokenizer, device=0,)
|
|
|
3 |
from transformers import pipeline, GPTJForCausalLM, AutoModelForCausalLM
|
4 |
from peft import LoraConfig, get_peft_model, PeftModel, PeftConfig
|
5 |
|
6 |
+
# config = PeftConfig.from_pretrained("hackathon-somos-nlp-2023/bertin-gpt-j-6b-ner-es")
|
7 |
+
# model = AutoModelForCausalLM.from_pretrained("hackathon-somos-nlp-2023/bertin-gpt-j-6b-ner-es", return_dict=True, load_in_8bit=True, device_map='auto')
|
8 |
|
9 |
+
# # load tokenizer
|
10 |
+
# tokenizer = AutoTokenizer.from_pretrained("hackathon-somos-nlp-2023/bertin-gpt-j-6b-ner-es")
|
11 |
+
|
12 |
+
# # Load the Lora model
|
13 |
+
# model = PeftModel.from_pretrained(model, "hackathon-somos-nlp-2023/bertin-gpt-j-6b-ner-es")
|
14 |
|
|
|
15 |
|
16 |
+
# # load fp 16 model
|
17 |
+
model = GPTJForCausalLM.from_pretrained("bertin-project/bertin-gpt-j-6B")
|
18 |
|
19 |
+
config = AutoConfig.from_pretrained("bertin-project/bertin-gpt-j-6B")
|
|
|
20 |
|
21 |
# create pipeline
|
22 |
pipe = pipeline("text-generation", model=model, config=config, tokenizer=tokenizer, device=0,)
|