Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -8,7 +8,7 @@ import torch
|
|
8 |
from transformers import GPT2Tokenizer, GPT2LMHeadModel, GPT2Config
|
9 |
|
10 |
def generate(tokenizer, model, text, features):
|
11 |
-
generated = tokenizer("<|startoftext
|
12 |
sample_outputs = model.generate(
|
13 |
generated, do_sample=True, top_k=50,
|
14 |
max_length=features['max_length'], top_p=features['top_p'], temperature=features['t'] / 100.0, num_return_sequences=features['num'],
|
@@ -20,7 +20,10 @@ def generate(tokenizer, model, text, features):
|
|
20 |
|
21 |
|
22 |
def load_model():
|
23 |
-
|
|
|
|
|
|
|
24 |
config = GPT2Config.from_json_file('./config.json')
|
25 |
model = GPT2LMHeadModel(config)
|
26 |
state_dict = torch.load('./pytorch_model.bin', map_location=torch.device('cpu'))
|
|
|
8 |
from transformers import GPT2Tokenizer, GPT2LMHeadModel, GPT2Config
|
9 |
|
10 |
def generate(tokenizer, model, text, features):
|
11 |
+
generated = tokenizer("<|startoftext|><|titlestart|>{}<|titleend|><|authornamebegin|>".format(text), return_tensors="pt").input_ids
|
12 |
sample_outputs = model.generate(
|
13 |
generated, do_sample=True, top_k=50,
|
14 |
max_length=features['max_length'], top_p=features['top_p'], temperature=features['t'] / 100.0, num_return_sequences=features['num'],
|
|
|
20 |
|
21 |
|
22 |
def load_model():
|
23 |
+
additional_special_tokens = ['<|titlestart|>', '<|titleend|>', '<|authornamebegin|>', '<|authornameend|>']
|
24 |
+
tokenizer = GPT2Tokenizer.from_pretrained('gpt2-medium', bos_token='<|startoftext|>',
|
25 |
+
eos_token='<|endoftext|>', pad_token='<|pad|>',
|
26 |
+
additional_special_tokens=additional_special_tokens)
|
27 |
config = GPT2Config.from_json_file('./config.json')
|
28 |
model = GPT2LMHeadModel(config)
|
29 |
state_dict = torch.load('./pytorch_model.bin', map_location=torch.device('cpu'))
|