|
import gradio as gr |
|
import openai |
|
import fitz |
|
import base64 |
|
|
|
|
|
api_key = "" |
|
|
|
|
|
def set_api_key(key): |
|
global api_key |
|
api_key = key |
|
return "API Key Set Successfully!" |
|
|
|
|
|
def query_openai(messages, temperature, top_p, max_output_tokens): |
|
if not api_key: |
|
return ["Please enter your OpenAI API key first."], messages |
|
|
|
try: |
|
openai.api_key = api_key |
|
|
|
|
|
temperature = float(temperature) if temperature else 1.0 |
|
top_p = float(top_p) if top_p else 1.0 |
|
max_output_tokens = int(max_output_tokens) if max_output_tokens else 2048 |
|
|
|
response = openai.ChatCompletion.create( |
|
model="gpt-4.5-preview", |
|
messages=messages, |
|
temperature=temperature, |
|
top_p=top_p, |
|
max_tokens=max_output_tokens |
|
) |
|
|
|
bot_response = response["choices"][0]["message"]["content"] |
|
messages.append({"role": "assistant", "content": bot_response}) |
|
|
|
return messages, messages |
|
|
|
except Exception as e: |
|
return [f"Error: {str(e)}"], messages |
|
|
|
|
|
def image_url_chat(image_url, text_query, messages, temperature, top_p, max_output_tokens): |
|
if not image_url or not text_query: |
|
return ["Please provide an image URL and a query."], messages |
|
|
|
messages.append({"role": "user", "content": [ |
|
{"type": "image_url", "image_url": {"url": image_url}}, |
|
{"type": "text", "text": text_query} |
|
]}) |
|
return query_openai(messages, temperature, top_p, max_output_tokens) |
|
|
|
|
|
def text_chat(text_query, messages, temperature, top_p, max_output_tokens): |
|
if not text_query: |
|
return ["Please enter a query."], messages |
|
|
|
messages.append({"role": "user", "content": text_query}) |
|
return query_openai(messages, temperature, top_p, max_output_tokens) |
|
|
|
|
|
def image_chat(image_file, text_query, messages, temperature, top_p, max_output_tokens): |
|
if image_file is None or not text_query: |
|
return ["Please upload an image and provide a query."], messages |
|
|
|
|
|
with open(image_file, "rb") as img: |
|
base64_image = base64.b64encode(img.read()).decode("utf-8") |
|
|
|
image_data = f"data:image/jpeg;base64,{base64_image}" |
|
|
|
messages.append({"role": "user", "content": [ |
|
{"type": "image_url", "image_url": {"url": image_data}}, |
|
{"type": "text", "text": text_query} |
|
]}) |
|
return query_openai(messages, temperature, top_p, max_output_tokens) |
|
|
|
|
|
def pdf_chat(pdf_file, text_query, messages, temperature, top_p, max_output_tokens): |
|
if pdf_file is None or not text_query: |
|
return ["Please upload a PDF and provide a query."], messages |
|
|
|
doc = fitz.open(pdf_file) |
|
text = "\n".join([page.get_text("text") for page in doc][:5]) |
|
|
|
messages.append({"role": "user", "content": [ |
|
{"type": "text", "text": text}, |
|
{"type": "text", "text": text_query} |
|
]}) |
|
return query_openai(messages, temperature, top_p, max_output_tokens) |
|
|
|
|
|
def clear_chat(): |
|
return [], [], [], [], "", "", [], "", [], None, "", [], None, "", 1.0, 1.0, 2048 |
|
|
|
|
|
with gr.Blocks() as demo: |
|
gr.Markdown("## GPT-4.5 Preview Conversational Chatbot") |
|
|
|
|
|
with gr.Row(): |
|
api_key_input = gr.Textbox(label="Enter OpenAI API Key", type="password") |
|
api_key_button = gr.Button("Set API Key") |
|
api_key_output = gr.Textbox(label="API Key Status", interactive=False) |
|
|
|
with gr.Row(): |
|
temperature = gr.Slider(0, 2, value=1.0, step=0.1, label="Temperature") |
|
top_p = gr.Slider(0, 1, value=1.0, step=0.1, label="Top-P") |
|
max_output_tokens = gr.Slider(0, 16384, value=2048, step=512, label="Max Output Tokens") |
|
|
|
with gr.Tabs(): |
|
with gr.Tab("Image URL Chat"): |
|
image_url = gr.Textbox(label="Enter Image URL") |
|
image_query = gr.Textbox(label="Ask about the Image") |
|
image_url_output = gr.Chatbot(label="Conversation History", elem_id="chatbot1") |
|
image_url_button = gr.Button("Ask") |
|
|
|
with gr.Tab("Text Chat"): |
|
text_query = gr.Textbox(label="Enter your query") |
|
text_output = gr.Chatbot(label="Conversation History", elem_id="chatbot2") |
|
text_button = gr.Button("Ask") |
|
|
|
with gr.Tab("Image Chat"): |
|
image_upload = gr.File(label="Upload an Image", type="filepath") |
|
image_text_query = gr.Textbox(label="Ask about the uploaded image") |
|
image_output = gr.Chatbot(label="Conversation History", elem_id="chatbot3") |
|
image_button = gr.Button("Ask") |
|
|
|
with gr.Tab("PDF Chat"): |
|
pdf_upload = gr.File(label="Upload a PDF", type="filepath") |
|
pdf_text_query = gr.Textbox(label="Ask about the uploaded PDF") |
|
pdf_output = gr.Chatbot(label="Conversation History", elem_id="chatbot4") |
|
pdf_button = gr.Button("Ask") |
|
|
|
|
|
clear_button = gr.Button("Clear Chat") |
|
|
|
|
|
image_url_chat_history = gr.State([]) |
|
text_chat_history = gr.State([]) |
|
image_chat_history = gr.State([]) |
|
pdf_chat_history = gr.State([]) |
|
|
|
|
|
api_key_button.click(set_api_key, inputs=[api_key_input], outputs=[api_key_output]) |
|
image_url_button.click(image_url_chat, [image_url, image_query, image_url_chat_history, temperature, top_p, max_output_tokens], [image_url_output, image_url_chat_history]) |
|
text_button.click(text_chat, [text_query, text_chat_history, temperature, top_p, max_output_tokens], [text_output, text_chat_history]) |
|
image_button.click(image_chat, [image_upload, image_text_query, image_chat_history, temperature, top_p, max_output_tokens], [image_output, image_chat_history]) |
|
pdf_button.click(pdf_chat, [pdf_upload, pdf_text_query, pdf_chat_history, temperature, top_p, max_output_tokens], [pdf_output, pdf_chat_history]) |
|
|
|
|
|
clear_button.click( |
|
clear_chat, |
|
outputs=[ |
|
image_url_chat_history, text_chat_history, image_chat_history, pdf_chat_history, |
|
image_url, image_query, image_url_output, |
|
text_query, text_output, |
|
image_upload, image_text_query, image_output, |
|
pdf_upload, pdf_text_query, pdf_output, |
|
temperature, top_p, max_output_tokens |
|
] |
|
) |
|
|
|
|
|
if __name__ == "__main__": |
|
demo.launch() |