teaevo commited on
Commit
0343785
·
1 Parent(s): 0f46be8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -2,14 +2,14 @@ import gradio as gr
2
  from transformers import AutoModelForSeq2SeqLM, AutoTokenizer, TapasForQuestionAnswering, TapasTokenizer
3
 
4
  # Load the models and tokenizers
5
- tapas_model_name = "google/tapas-large-finetuned-wtq"
6
  dialogpt_model_name = "microsoft/DialoGPT-medium"
7
 
8
  tapas_tokenizer = TapasTokenizer.from_pretrained(tapas_model_name)
9
- tapas_model = TapasForQuestionAnswering.from_pretrained(tapas_model_name)
10
 
11
  dialogpt_tokenizer = AutoTokenizer.from_pretrained(dialogpt_model_name)
12
- dialogpt_model = AutoModelForSeq2SeqLM.from_pretrained(dialogpt_model_name)
13
 
14
  def answer_table_question(table, question):
15
  encoding = tapas_tokenizer(table=table, query=question, return_tensors="pt")
 
2
  from transformers import AutoModelForSeq2SeqLM, AutoTokenizer, TapasForQuestionAnswering, TapasTokenizer
3
 
4
  # Load the models and tokenizers
5
+ tapas_model_name = "microsoft/tapex-large-finetuned-wtq"
6
  dialogpt_model_name = "microsoft/DialoGPT-medium"
7
 
8
  tapas_tokenizer = TapasTokenizer.from_pretrained(tapas_model_name)
9
+ tapas_model = BartForConditionalGeneration.from_pretrained(tapas_model_name)
10
 
11
  dialogpt_tokenizer = AutoTokenizer.from_pretrained(dialogpt_model_name)
12
+ dialogpt_model = AutoModelForSeqCausalLM.from_pretrained(dialogpt_model_name)
13
 
14
  def answer_table_question(table, question):
15
  encoding = tapas_tokenizer(table=table, query=question, return_tensors="pt")