teaevo commited on
Commit
54210ca
·
1 Parent(s): abad0fd

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +17 -8
app.py CHANGED
@@ -1,15 +1,24 @@
1
  import gradio as gr
2
- from transformers import AutoModelForCausalLM, AutoTokenizer
 
3
 
4
- def chatbot_response(user_message):
5
- model_name = "gpt2" # You can change this to any other model from the list above
6
- tokenizer = AutoTokenizer.from_pretrained(model_name)
7
- model = AutoModelForCausalLM.from_pretrained(model_name)
8
 
9
- inputs = tokenizer.encode("User: " + user_message, return_tensors="pt")
10
- outputs = model.generate(inputs, max_length=100, num_return_sequences=1)
11
- response = tokenizer.decode(outputs[0], skip_special_tokens=True)
 
 
12
 
 
 
 
 
 
 
 
13
  return response
14
 
15
  # Define the chatbot interface using Gradio
 
1
  import gradio as gr
2
+ from transformers import TapexTokenizer, BartForConditionalGeneration
3
+ import pandas as pd
4
 
5
+ model_name = "microsoft/tapex-large-sql-execution" # You can change this to any other model from the list above
6
+ tokenizer = TapexTokenizer.from_pretrained(model_name)
7
+ model = BartForConditionalGeneration.from_pretrained(model_name)
 
8
 
9
+ data = {
10
+ "year": [1896, 1900, 1904, 2004, 2008, 2012],
11
+ "city": ["athens", "paris", "st. louis", "athens", "beijing", "london"]
12
+ }
13
+ table = pd.DataFrame.from_dict(data)
14
 
15
+ def chatbot_response(user_message):
16
+
17
+ inputs = tokenizer.encode("User: " + user_message, return_tensors="pt")
18
+ encoding = tokenizer(table=table, query=inputs, return_tensors="pt")
19
+ outputs = model.generate(**encoding)
20
+ response = tokenizer.batch_decode(outputs, skip_special_tokens=True)
21
+
22
  return response
23
 
24
  # Define the chatbot interface using Gradio