ziyadsuper2017's picture
Update app.py
b557897
raw
history blame
3.01 kB
import streamlit as st
from PIL import Image
import io
import base64
import google.generativeai as genai
# Configure the API key (should be set as an environment variable or secure storage in production)
api_key = "AIzaSyC70u1sN87IkoxOoIj4XCAPw97ae2LZwNM"
genai.configure(api_key=api_key)
generation_config = {
"temperature": 0.9,
"max_output_tokens": 3000
}
safety_settings = []
# Initialize session state
if "chat_history" not in st.session_state:
st.session_state.chat_history = []
# UI layout
st.title("Gemini Chatbot")
# Display the chat history
for message in st.session_state.chat_history:
role, text = message
st.text_area(f"{role.title()} says:", value=text, height=75, disabled=True)
# Text input for the user to send messages
user_input = st.text_input("Enter your message here:")
# File uploader for images
uploaded_files = st.file_uploader("Upload images:", type=["png", "jpg", "jpeg"], accept_multiple_files=True)
# Function to convert image to base64
def get_image_base64(image):
buffered = io.BytesIO()
image.save(buffered, format="JPEG")
img_str = base64.b64encode(buffered.getvalue()).decode()
return f"data:image/jpeg;base64,{img_str}"
# When the 'Send' button is clicked, process the input and generate a response
if st.button("Send"):
# Save user input to the chat history if it's not empty
if user_input.strip():
st.session_state.chat_history.append(("user", user_input))
# Process and save uploaded images to the chat history
image_parts = []
for uploaded_file in uploaded_files:
bytes_data = uploaded_file.read()
image = Image.open(io.BytesIO(bytes_data))
image_base64 = get_image_base64(image)
image_parts.append({
"mime_type": uploaded_file.type,
"data": image_base64
})
st.session_state.chat_history.append(("user", f"Uploaded image: {uploaded_file.name}"))
# Prepare the prompts for the model
prompts = []
if user_input.strip():
prompts.append({"role": "user", "parts": [{"text": user_input}]})
for image_part in image_parts:
prompts.append({"role": "user", "parts": [image_part]})
# Generate the response
if image_parts:
model = genai.GenerativeModel(
model_name='gemini-pro-vision',
generation_config=generation_config,
safety_settings=safety_settings
)
else:
model = genai.GenerativeModel(
model_name='gemini-pro',
generation_config=generation_config,
safety_settings=safety_settings
)
response = model.generate_content(prompts)
response_text = response['text']
# Save the model response to the chat history
st.session_state.chat_history.append(("model", response_text))
# Display the updated chat history
for message in st.session_state.chat_history:
role, text = message
st.text_area(f"{role.title()} says:", value=text, height=75, disabled=True)