Spaces:
Runtime error
Runtime error
| import streamlit as st | |
| from PIL import Image | |
| import io | |
| import base64 | |
| import google.generativeai as genai | |
| # Configure the API key (should be set as an environment variable or secure storage in production) | |
| api_key = "AIzaSyC70u1sN87IkoxOoIj4XCAPw97ae2LZwNM" # Replace with your actual API key | |
| genai.configure(api_key=api_key) | |
| generation_config = genai.GenerationConfig( | |
| temperature=0.9, | |
| max_output_tokens=3000 | |
| ) | |
| safety_settings = [] | |
| # Initialize session state for chat history | |
| if 'chat_history' not in st.session_state: | |
| st.session_state['chat_history'] = [] | |
| # UI layout | |
| st.title("Gemini Chatbot") | |
| # Display the chat history | |
| for message in st.session_state['chat_history']: | |
| role, text = message | |
| st.markdown(f"**{role.title()}**: {text}") | |
| # Multiline text input for the user to send messages | |
| user_input = st.text_area("Enter your message here:", key="user_input") | |
| # File uploader for images | |
| uploaded_files = st.file_uploader( | |
| "Upload images:", | |
| type=["png", "jpg", "jpeg"], | |
| accept_multiple_files=True, | |
| key="file_uploader" | |
| ) | |
| # Function to convert image to base64 | |
| def get_image_base64(image): | |
| buffered = io.BytesIO() | |
| image.save(buffered, format="JPEG") | |
| img_str = base64.b64encode(buffered.getvalue()).decode() | |
| return img_str | |
| # When the 'Send' button is clicked, process the input and generate a response | |
| if st.button("Send", key="send_button"): | |
| if user_input or uploaded_files: | |
| # Save user input to the chat history | |
| if user_input: | |
| st.session_state['chat_history'].append(("User", user_input)) | |
| if uploaded_files: | |
| images_prompts = [] | |
| for uploaded_file in uploaded_files: | |
| bytes_data = uploaded_file.read() | |
| image = Image.open(io.BytesIO(bytes_data)) | |
| image_base64 = get_image_base64(image) | |
| st.session_state['chat_history'].append(("User", f"Uploaded image: {uploaded_file.name}")) | |
| image_prompt = {"role": "user", "parts": [{"mime_type": uploaded_file.type, "data": image_base64}]} | |
| images_prompts.append(image_prompt) | |
| # Use the Gemini model for vision | |
| model_name = 'gemini-pro-vision' | |
| model = genai.GenerativeModel( | |
| model_name=model_name, | |
| generation_config=generation_config, | |
| safety_settings=safety_settings | |
| ) | |
| response = model.generate_content(images_prompts) | |
| response_text = response.text if hasattr(response, "text") else "No response text found." | |
| st.session_state['chat_history'].append(("Gemini", response_text)) | |
| elif user_input: | |
| # Use the Gemini model for text | |
| model_name = 'gemini-pro' | |
| model = genai.GenerativeModel( | |
| model_name=model_name, | |
| generation_config=generation_config, | |
| safety_settings=safety_settings | |
| ) | |
| prompts = [{"role": "user", "parts": [{"text": user_input}]}] | |
| response = model.generate_content(prompts) | |
| response_text = response.text if hasattr(response, "text") else "No response text found." | |
| st.session_state['chat_history'].append(("Gemini", response_text)) | |
| # Clear the input box after sending the message | |
| st.session_state['user_input'] = "" | |
| # Re-display the chat history to include the new messages | |
| for message in st.session_state['chat_history']: | |
| role, text = message | |
| st.markdown(f"**{role.title()}**: {text}") |