Spaces:
Runtime error
Runtime error
import streamlit as st | |
from PIL import Image | |
import io | |
import base64 | |
# Assuming google.generativeai as genai is the correct import based on your description | |
import google.generativeai as genai | |
# Configure the API key (should be set as an environment variable or secure storage in production) | |
api_key = "AIzaSyC70u1sN87IkoxOoIj4XCAPw97ae2LZwNM" # Replace with your actual API key | |
genai.configure(api_key=api_key) | |
generation_config = genai.GenerationConfig( | |
temperature=0.9, | |
max_output_tokens=3000 | |
) | |
safety_settings = [] | |
# Initialize session state for chat history | |
if 'chat_history' not in st.session_state: | |
st.session_state['chat_history'] = [] | |
# UI layout | |
st.title("Gemini Chatbot") | |
# Function to convert image to base64 | |
def get_image_base64(image): | |
image = image.convert("RGB") # Convert to RGB to remove alpha channel if present | |
buffered = io.BytesIO() | |
image.save(buffered, format="JPEG") | |
img_str = base64.b64encode(buffered.getvalue()).decode() | |
return img_str | |
# Callback function to send message and clear input | |
def send_message(): | |
user_input = st.session_state['user_input'] | |
uploaded_files = st.session_state['file_uploader'] | |
if user_input or uploaded_files: | |
# Save user input to the chat history | |
if user_input: | |
st.session_state['chat_history'].append(("User", user_input)) | |
# Process text input | |
if user_input and not uploaded_files: | |
prompts = [{"role": "user", "parts": [{"text": user_input}]}] | |
model = genai.GenerativeModel( | |
model_name='gemini-pro', | |
generation_config=generation_config, | |
safety_settings=safety_settings | |
) | |
response = model.generate_content(prompts) | |
response_text = response.text if hasattr(response, "text") else "No response text found." | |
st.session_state['chat_history'].append(("Gemini", response_text)) | |
# Process and save uploaded images to the chat history | |
if uploaded_files: | |
for uploaded_file in uploaded_files: | |
image = Image.open(uploaded_file).convert("RGB") # Ensure image is in RGB | |
image_base64 = get_image_base64(image) | |
st.session_state['chat_history'].append(("User", f"Uploaded image: {uploaded_file.name}")) | |
image_prompt = {"role": "user", "parts": [{"mime_type": uploaded_file.type, "data": image_base64}]} | |
model = genai.GenerativeModel( | |
model_name='gemini-pro-vision', | |
generation_config=generation_config, | |
safety_settings=safety_settings | |
) | |
response = model.generate_content([image_prompt]) | |
response_text = response.text if hasattr(response, "text") else "No response text found." | |
st.session_state['chat_history'].append(("Gemini", response_text)) | |
# Clear the input box after sending the message | |
st.session_state.user_input = "" | |
# Multiline text input for the user to send messages | |
user_input = st.text_area("Enter your message here:", key="user_input", value="") | |
# File uploader for images | |
uploaded_files = st.file_uploader( | |
"Upload images:", | |
type=["png", "jpg", "jpeg"], | |
accept_multiple_files=True, | |
key="file_uploader" | |
) | |
# Button to send the message | |
send_button = st.button("Send", on_click=send_message) | |
# Display the chat history | |
for message in st.session_state['chat_history']: | |
role, text = message | |
st.markdown(f"**{role.title()}**: {text}") |