Commit
·
7a24b1b
1
Parent(s):
92dde49
fix tokenizer is not fast tokenizer issue
Browse files
app.py
CHANGED
@@ -1,6 +1,6 @@
|
|
1 |
import gradio as gr
|
2 |
|
3 |
-
from transformers import
|
4 |
import numpy as np
|
5 |
from transformers_cfg.grammar_utils import IncrementalGrammarConstraint
|
6 |
from transformers_cfg.generation.logits_process import GrammarConstrainedLogitsProcessor
|
@@ -9,7 +9,7 @@ MODEL_NAME = "gpt2"
|
|
9 |
|
10 |
if __name__ == "__main__":
|
11 |
# Define your model and your tokenizer
|
12 |
-
tokenizer =
|
13 |
model = AutoModelForCausalLM.from_pretrained(MODEL_NAME)
|
14 |
if tokenizer.pad_token_id is None:
|
15 |
tokenizer.pad_token_id = tokenizer.eos_token_id
|
|
|
1 |
import gradio as gr
|
2 |
|
3 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM
|
4 |
import numpy as np
|
5 |
from transformers_cfg.grammar_utils import IncrementalGrammarConstraint
|
6 |
from transformers_cfg.generation.logits_process import GrammarConstrainedLogitsProcessor
|
|
|
9 |
|
10 |
if __name__ == "__main__":
|
11 |
# Define your model and your tokenizer
|
12 |
+
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
|
13 |
model = AutoModelForCausalLM.from_pretrained(MODEL_NAME)
|
14 |
if tokenizer.pad_token_id is None:
|
15 |
tokenizer.pad_token_id = tokenizer.eos_token_id
|