Spaces:
Runtime error
Runtime error
File size: 3,387 Bytes
221a628 b557897 221a628 389cdce b557897 221a628 b557897 dfdbfa8 6e074fc 221a628 dfdbfa8 221a628 6e074fc 837873a dfdbfa8 b557897 221a628 00bfc2f 389cdce 00bfc2f dfdbfa8 fd4809b ba4c612 389cdce a76b0fb fd4809b c72bfe4 389cdce c72bfe4 fd4809b c72bfe4 ba4c612 fd4809b ba4c612 389cdce a76b0fb 389cdce fd4809b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 |
import streamlit as st
from PIL import Image
import io
import base64
# Assuming google.generativeai as genai is the correct import based on your description
import google.generativeai as genai
# Configure the API key (should be set as an environment variable or secure storage in production)
api_key = "AIzaSyC70u1sN87IkoxOoIj4XCAPw97ae2LZwNM" # Replace with your actual API key
genai.configure(api_key=api_key)
generation_config = genai.GenerationConfig(
temperature=0.9,
max_output_tokens=3000
)
safety_settings = []
# Initialize session state for chat history
if 'chat_history' not in st.session_state:
st.session_state['chat_history'] = []
# UI layout
st.title("Gemini Chatbot")
# Function to convert image to base64
def get_image_base64(image):
image = image.convert("RGB") # Convert to RGB to remove alpha channel if present
buffered = io.BytesIO()
image.save(buffered, format="JPEG")
img_str = base64.b64encode(buffered.getvalue()).decode()
return img_str
# Function to send message and clear input
def send_message():
user_input = st.session_state['user_input']
uploaded_files = st.session_state['file_uploader']
if user_input or uploaded_files:
# Save user input to the chat history
if user_input:
st.session_state['chat_history'].append({"role": "user", "parts": [{"text": user_input}]})
# Process uploaded images
image_prompts = []
if uploaded_files:
for uploaded_file in uploaded_files:
image = Image.open(uploaded_file).convert("RGB") # Ensure image is in RGB
image_base64 = get_image_base64(image)
image_prompt = {"role": "user", "parts": [{"mime_type": uploaded_file.type, "data": image_base64}]}
image_prompts.append(image_prompt)
st.session_state['chat_history'].extend(image_prompts)
# Choose the appropriate model based on the input type
model_name = 'gemini-pro-vision' if uploaded_files else 'gemini-pro'
model = genai.GenerativeModel(
model_name=model_name,
generation_config=generation_config,
safety_settings=safety_settings
)
# Generate the response
response = model.generate_content(st.session_state['chat_history'])
response_text = response.text if hasattr(response, "text") else "No response text found."
st.session_state['chat_history'].append({"role": "model", "parts": [{"text": response_text}]})
# Clear the input box after sending the message
st.session_state.user_input = ""
st.session_state.file_uploader = None # Clear uploaded files
# Multiline text input for the user to send messages
user_input = st.text_area("Enter your message here:", key="user_input", value="")
# File uploader for images
uploaded_files = st.file_uploader(
"Upload images:",
type=["png", "jpg", "jpeg"],
accept_multiple_files=True,
key="file_uploader"
)
# Button to send the message
send_button = st.button("Send", on_click=send_message)
# Display the chat history
for entry in st.session_state['chat_history']:
role = entry["role"]
parts = entry["parts"][0]
if 'text' in parts:
st.markdown(f"**{role.title()}**: {parts['text']}")
elif 'data' in parts:
st.markdown(f"**{role.title()}**: (Image)") |