Spaces:
Sleeping
Sleeping
File size: 721 Bytes
997926b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 |
from transformers import BertTokenizer, BertForQuestionAnswering
import torch
model_path = "bert-large-uncased-whole-word-masking-finetuned-squad"
tokenizer = BertTokenizer.from_pretrained(model_path)
model = BertForQuestionAnswering.from_pretrained(model_path)
def get_answer(question, context):
"""Answers a question using BERT on given context."""
inputs = tokenizer(question, context, return_tensors="pt", truncation=True, max_length=512)
with torch.no_grad():
outputs = model(**inputs)
start = torch.argmax(outputs.start_logits)
end = torch.argmax(outputs.end_logits) + 1
return tokenizer.convert_tokens_to_string(tokenizer.convert_ids_to_tokens(inputs["input_ids"][0][start:end]))
|