import os import google.generativeai as genai import gradio as gr # Configure the API key genai.configure(api_key=os.environ["GEMINI_API_KEY"]) # Create the model generation_config = { "temperature": 1, "top_p": 0.95, "top_k": 64, "max_output_tokens": 8192, "response_mime_type": "text/plain", } model = genai.GenerativeModel( model_name="gemini-1.5-flash", generation_config=generation_config, # safety_settings can be adjusted # See https://ai.google.dev/gemini-api/docs/safety-settings ) # Function to handle chat def chat_with_model(user_input, history): # Update the history with the user's input history.append({"role": "user", "content": user_input}) # Start or continue the chat session chat_session = model.start_chat(history=history) response = chat_session.send_message(user_input) # Add the assistant's response to the history history.append({"role": "assistant", "content": response.text}) # Format the history for display in Gradio Chatbot messages = [] for i in range(0, len(history), 2): user_msg = history[i]["content"] assistant_msg = history[i+1]["content"] if i+1 < len(history) else "" messages.append((user_msg, assistant_msg)) return messages, history # Create Gradio app with gr.Blocks() as demo: gr.Markdown("# Chat with Gemini Model") chatbot = gr.Chatbot() state = gr.State([]) with gr.Row(): user_input = gr.Textbox( show_label=False, placeholder="Type your message and press Enter" ).style(container=False) send_btn = gr.Button("Send") send_btn.click(chat_with_model, [user_input, state], [chatbot, state]) user_input.submit(chat_with_model, [user_input, state], [chatbot, state]) # Clear input after sending send_btn.click(lambda: "", None, user_input) user_input.submit(lambda: "", None, user_input) demo.launch()