gradio / app.py
teaevo's picture
Update app.py
2ee70bc
raw
history blame
1.78 kB
import gradio as gr
from transformers import AutoModelForQuestionAnswering, AutoTokenizer
from transformers import TapasTokenizer, TapasForQuestionAnswering
def execute_sql(user_query):
model_name = "microsoft/tapex-large-sql-execution" # Tapex large SQL execution model
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForQuestionAnswering.from_pretrained(model_name)
inputs = tokenizer(user_query, return_tensors="pt", padding=True)
outputs = model(**inputs)
answer = tokenizer.decode(inputs['input_ids'][0][outputs['start_logits'].argmax():outputs['end_logits'].argmax() + 1])
return answer
'''
def chatbot_response(user_message):
# Your chatbot code goes here (using GPT-2 or any other text generation model)
# For example, you can use the GPT-2 code from the previous responses
return chatbot_generated_response
# Define the chatbot and SQL execution interface using Gradio
chatbot_interface = gr.Interface(
fn=chatbot_response,
inputs=gr.Textbox(prompt="You:"),
outputs=gr.Textbox(),
live=True,
capture_session=True,
title="Chatbot",
description="Type your message in the box above, and the chatbot will respond.",
)
'''
sql_execution_interface = gr.Interface(
fn=execute_sql,
inputs=gr.Textbox(prompt="Enter your SQL query:"),
outputs=gr.Textbox(),
live=True,
capture_session=True,
title="SQL Execution",
description="Type your SQL query in the box above, and the chatbot will execute it.",
)
# Combine the chatbot and SQL execution interfaces
#combined_interface = gr.Interface([chatbot_interface, sql_execution_interface], layout="horizontal")
# Launch the combined Gradio interface
if __name__ == "__main__":
sql_execution_interface.launch()