import gradio as gr # from huggingface_hub import InferenceClient # """ # For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference # """ # client = InferenceClient("HuggingFaceH4/zephyr-7b-beta") # def respond( # message, # history: list[tuple[str, str]], # system_message, # max_tokens, # temperature, # top_p, # ): # messages = [{"role": "system", "content": system_message}] # for val in history: # if val[0]: # messages.append({"role": "user", "content": val[0]}) # if val[1]: # messages.append({"role": "assistant", "content": val[1]}) # messages.append({"role": "user", "content": message}) # response = "" # for message in client.chat_completion( # messages, # max_tokens=max_tokens, # stream=True, # temperature=temperature, # top_p=top_p, # ): # token = message.choices[0].delta.content # response += token # yield response # """ # For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface # """ # demo = gr.ChatInterface( # respond, # additional_inputs=[ # gr.Textbox(value="You are a friendly SQL Chatbot.", label="System message"), # gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"), # gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"), # gr.Slider( # minimum=0.1, # maximum=1.0, # value=0.95, # step=0.05, # label="Top-p (nucleus sampling)", # ), # ], # ) # if __name__ == "__main__": # demo.launch() # Import necessary libraries import gradio as gr # Define the prompt template odoo_text2sql_prompt = """ Instruction: {instruction} Input: {input_text} Output: {output_text} DB Schema: {db_schema} """ # Define the database schema db_schema = """ CREATE TABLE product_product ( id SERIAL NOT NULL, message_main_attachment_id INTEGER, product_tmpl_id INTEGER NOT NULL, create_uid INTEGER, write_uid INTEGER, default_code VARCHAR, barcode VARCHAR, combination_indices VARCHAR, volume NUMERIC, weight NUMERIC, active BOOLEAN, can_image_variant_1024_be_zoomed BOOLEAN, create_date TIMESTAMP WITHOUT TIME ZONE, write_date TIMESTAMP WITHOUT TIME ZONE, store_qty_available DOUBLE PRECISION, store_standard_price DOUBLE PRECISION, store_sales_count DOUBLE PRECISION, CONSTRAINT product_product_pkey PRIMARY KEY (id), CONSTRAINT product_product_create_uid_fkey FOREIGN KEY(create_uid) REFERENCES res_users (id) ON DELETE SET NULL, CONSTRAINT product_product_message_main_attachment_id_fkey FOREIGN KEY(message_main_attachment_id) REFERENCES ir_attachment (id) ON DELETE SET NULL, CONSTRAINT product_product_product_tmpl_id_fkey FOREIGN KEY(product_tmpl_id) REFERENCES product_template (id) ON DELETE CASCADE, CONSTRAINT product_product_write_uid_fkey FOREIGN KEY(write_uid) REFERENCES res_users (id) ON DELETE SET NULL ) """ # Function to generate SQL query (placeholder function) def generate_sql(instruction, input_text): return "Model is not loaded. Please ensure you have the necessary GPU resources." # Function to clear inputs def clear_inputs(): return "", "" # Create the Gradio interface with enhanced features with gr.Blocks(css=""" .centered { display: flex; justify-content: center; align-items: center; text-align: center; } .title { font-size: 2em; font-weight: bold; margin-bottom: 20px; } .description { font-size: 1.2em; margin-bottom: 20px; } .button { background-color: #007BFF; /* Sea blue color */ color: white; border: none; padding: 10px 20px; text-align: center; text-decoration: none; display: inline-block; font-size: 16px; margin: 4px 2px; cursor: pointer; border-radius: 12px; } .button:hover { background-color: #0056b3; } """) as demo: gr.Markdown('