|
import gradio as gr |
|
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer |
|
|
|
def execute_sql(user_query): |
|
model_name = "microsoft/tapex-large-sql-execution" |
|
tokenizer = AutoTokenizer.from_pretrained(model_name) |
|
model = AutoModelForSeq2SeqLM.from_pretrained(model_name) |
|
|
|
inputs = tokenizer(user_query, return_tensors="pt", padding=True) |
|
outputs = model.generate(inputs['input_ids'], attention_mask=inputs['attention_mask'], max_length=1024) |
|
response = tokenizer.decode(outputs[0], skip_special_tokens=True) |
|
|
|
return response |
|
|
|
''' |
|
def chatbot_response(user_message): |
|
# Your chatbot code goes here (using GPT-2 or any other text generation model) |
|
# For example, you can use the GPT-2 code from the previous responses |
|
|
|
return chatbot_generated_response |
|
|
|
# Define the chatbot and SQL execution interface using Gradio |
|
chatbot_interface = gr.Interface( |
|
fn=chatbot_response, |
|
inputs=gr.Textbox(prompt="You:"), |
|
outputs=gr.Textbox(), |
|
live=True, |
|
capture_session=True, |
|
title="Chatbot", |
|
description="Type your message in the box above, and the chatbot will respond.", |
|
) |
|
''' |
|
|
|
sql_execution_interface = gr.Interface( |
|
fn=execute_sql, |
|
inputs=gr.Textbox(prompt="Enter your SQL query:"), |
|
outputs=gr.Textbox(), |
|
live=True, |
|
capture_session=True, |
|
title="SQL Execution", |
|
description="Type your SQL query in the box above, and the chatbot will execute it.", |
|
) |
|
|
|
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
sql_execution_interface.launch() |
|
|