Update app.py
Browse files
app.py
CHANGED
@@ -2,14 +2,14 @@ import gradio as gr
|
|
2 |
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer, TapasForQuestionAnswering, TapasTokenizer
|
3 |
|
4 |
# Load the models and tokenizers
|
5 |
-
tapas_model_name = "
|
6 |
dialogpt_model_name = "microsoft/DialoGPT-medium"
|
7 |
|
8 |
tapas_tokenizer = TapasTokenizer.from_pretrained(tapas_model_name)
|
9 |
-
tapas_model =
|
10 |
|
11 |
dialogpt_tokenizer = AutoTokenizer.from_pretrained(dialogpt_model_name)
|
12 |
-
dialogpt_model =
|
13 |
|
14 |
def answer_table_question(table, question):
|
15 |
encoding = tapas_tokenizer(table=table, query=question, return_tensors="pt")
|
|
|
2 |
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer, TapasForQuestionAnswering, TapasTokenizer
|
3 |
|
4 |
# Load the models and tokenizers
|
5 |
+
tapas_model_name = "microsoft/tapex-large-finetuned-wtq"
|
6 |
dialogpt_model_name = "microsoft/DialoGPT-medium"
|
7 |
|
8 |
tapas_tokenizer = TapasTokenizer.from_pretrained(tapas_model_name)
|
9 |
+
tapas_model = BartForConditionalGeneration.from_pretrained(tapas_model_name)
|
10 |
|
11 |
dialogpt_tokenizer = AutoTokenizer.from_pretrained(dialogpt_model_name)
|
12 |
+
dialogpt_model = AutoModelForSeqCausalLM.from_pretrained(dialogpt_model_name)
|
13 |
|
14 |
def answer_table_question(table, question):
|
15 |
encoding = tapas_tokenizer(table=table, query=question, return_tensors="pt")
|