Spaces:
Runtime error
Runtime error
File size: 3,008 Bytes
221a628 b557897 221a628 b557897 221a628 b557897 6e074fc 221a628 6e074fc b557897 6e074fc 221a628 6e074fc 837873a b557897 221a628 b557897 7a4fa7e b557897 837873a b557897 221a628 b557897 aa719f7 b557897 aa719f7 b557897 aa719f7 b557897 aa719f7 b557897 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 |
import streamlit as st
from PIL import Image
import io
import base64
import google.generativeai as genai
# Configure the API key (should be set as an environment variable or secure storage in production)
api_key = "AIzaSyC70u1sN87IkoxOoIj4XCAPw97ae2LZwNM"
genai.configure(api_key=api_key)
generation_config = {
"temperature": 0.9,
"max_output_tokens": 3000
}
safety_settings = []
# Initialize session state
if "chat_history" not in st.session_state:
st.session_state.chat_history = []
# UI layout
st.title("Gemini Chatbot")
# Display the chat history
for message in st.session_state.chat_history:
role, text = message
st.text_area(f"{role.title()} says:", value=text, height=75, disabled=True)
# Text input for the user to send messages
user_input = st.text_input("Enter your message here:")
# File uploader for images
uploaded_files = st.file_uploader("Upload images:", type=["png", "jpg", "jpeg"], accept_multiple_files=True)
# Function to convert image to base64
def get_image_base64(image):
buffered = io.BytesIO()
image.save(buffered, format="JPEG")
img_str = base64.b64encode(buffered.getvalue()).decode()
return f"data:image/jpeg;base64,{img_str}"
# When the 'Send' button is clicked, process the input and generate a response
if st.button("Send"):
# Save user input to the chat history if it's not empty
if user_input.strip():
st.session_state.chat_history.append(("user", user_input))
# Process and save uploaded images to the chat history
image_parts = []
for uploaded_file in uploaded_files:
bytes_data = uploaded_file.read()
image = Image.open(io.BytesIO(bytes_data))
image_base64 = get_image_base64(image)
image_parts.append({
"mime_type": uploaded_file.type,
"data": image_base64
})
st.session_state.chat_history.append(("user", f"Uploaded image: {uploaded_file.name}"))
# Prepare the prompts for the model
prompts = []
if user_input.strip():
prompts.append({"role": "user", "parts": [{"text": user_input}]})
for image_part in image_parts:
prompts.append({"role": "user", "parts": [image_part]})
# Generate the response
if image_parts:
model = genai.GenerativeModel(
model_name='gemini-pro-vision',
generation_config=generation_config,
safety_settings=safety_settings
)
else:
model = genai.GenerativeModel(
model_name='gemini-pro',
generation_config=generation_config,
safety_settings=safety_settings
)
response = model.generate_content(prompts)
response_text = response['text']
# Save the model response to the chat history
st.session_state.chat_history.append(("model", response_text))
# Display the updated chat history
for message in st.session_state.chat_history:
role, text = message
st.text_area(f"{role.title()} says:", value=text, height=75, disabled=True) |