Spaces:
Runtime error
Runtime error
import streamlit as st | |
import google.generativeai as genai | |
import sqlite3 | |
from PIL import Image | |
# Database setup | |
conn = sqlite3.connect('chat_history.db') | |
c = conn.cursor() | |
c.execute(''' | |
CREATE TABLE IF NOT EXISTS history | |
(role TEXT, message TEXT) | |
''') | |
# Generative AI setup | |
api_key = "AIzaSyC70u1sN87IkoxOoIj4XCAPw97ae2LZwNM" | |
genai.configure(api_key=api_key) | |
generation_config = { | |
"temperature": 0.9, | |
"max_output_tokens": 3000 | |
} | |
safety_settings = [] | |
# Streamlit UI | |
st.set_page_config(page_title="Chatbot", page_icon="🤖") | |
# Header with logo | |
st.markdown(""" | |
<style> | |
.container { | |
display: flex; | |
} | |
.logo-text { | |
font-weight:700 !important; | |
font-size:50px !important; | |
color: #f9a01b !important; | |
padding-top: 75px !important; | |
} | |
.logo-img { | |
float:right; | |
} | |
</style> | |
<div class="container"> | |
<p class="logo-text">Chatbot</p> | |
<img class="logo-img" src="https://media.roboflow.com/spaces/gemini-icon.png" width=120 height=120> | |
</div> | |
""", unsafe_allow_html=True) | |
# Sidebar for parameters and model selection | |
st.sidebar.title("Parameters") | |
temperature = st.sidebar.slider( | |
"Temperature", | |
min_value=0.0, | |
max_value=1.0, | |
value=0.9, | |
step=0.01, | |
help="Temperature controls the degree of randomness in token selection. Lower temperatures are good for prompts that expect a true or correct response, while higher temperatures can lead to more diverse or unexpected results." | |
) | |
max_output_tokens = st.sidebar.slider( | |
"Token limit", | |
min_value=1, | |
max_value=2048, | |
value=3000, | |
step=1, | |
help="Token limit determines the maximum amount of text output from one prompt. A token is approximately four characters. The default value is 2048." | |
) | |
st.sidebar.title("Model") | |
model_name = st.sidebar.selectbox( | |
"Select a model", | |
options=["gemini-pro", "gemini-pro-vision"], | |
index=0, | |
help="Gemini Pro is a text-only model that can generate natural language responses based on the chat history. Gemini Pro Vision is a multimodal model that can generate natural language responses based on the chat history and the uploaded images." | |
) | |
# Initialize chat_history in session state | |
if "chat_history" not in st.session_state: | |
st.session_state["chat_history"] = [] | |
# Display chat history | |
st.title("Chatbot") | |
for message in st.session_state["chat_history"]: | |
r, t = message["role"], message["parts"][0]["text"] | |
st.markdown(f"**{r.title()}:** {t}") | |
# If there is a model response, clear the user input | |
if st.session_state.chat_history and st.session_state.chat_history[-1]["role"] == "model": | |
st.session_state.user_input = "" | |
# User input | |
user_input = st.text_area("", value=st.session_state.user_input, height=5, key="user_input") | |
# File uploader | |
uploaded_files = st.file_uploader("Upload images here or paste screenshots", type=["png", "jpg", "jpeg"], accept_multiple_files=True, key="uploaded_files") | |
# If files are uploaded, open and display them | |
if uploaded_files: | |
for uploaded_file in uploaded_files: | |
image = Image.open(uploaded_file) | |
st.image(image) | |
# Clear button | |
clear_button = st.button("Clear", key="clear_button") | |
# Download button | |
download_button = st.button("Download", key="download_button") | |
# Progress bar | |
progress_bar = st.progress(0) | |
# Footer | |
st.markdown(""" | |
<style> | |
.footer { | |
position: fixed; | |
left: 0; | |
bottom: 0; | |
width: 100%; | |
background-color: #f9a01b; | |
color: white; | |
text-align: center; | |
} | |
</style> | |
<div class="footer"> | |
<p>Made with Streamlit and Google Generative AI</p> | |
</div> | |
""", unsafe_allow_html=True) | |
# Clear chat history and image uploader | |
if clear_button: | |
st.session_state["chat_history"] = [] | |
# Update progress bar | |
progress_bar.progress(1) | |
# Handle user input | |
if user_input: | |
# Add user input to chat history | |
st.session_state["chat_history"].append({"role": "user", "parts": [{"text": user_input}]}) | |
# Create a GenerationConfig instance | |
generation_config = genai.GenerationConfig( | |
temperature=temperature, | |
max_output_tokens=max_output_tokens, | |
# add other settings if needed | |
) | |
# Generate model response | |
try: | |
if model_name == "gemini-pro": | |
model = genai.GenerativeModel('gemini-pro') | |
response = model.generate_content( | |
contents=[user_input], | |
generation_config=generation_config | |
) | |
elif model_name == "gemini-pro-vision": | |
images = [Image.open(file).convert('RGB') for file in uploaded_files] | |
image_prompts = [{'mime_type': 'image/png', 'data': image.tobytes()} for image in images] | |
model = genai.GenerativeModel('gemini-pro-vision') | |
response = model.generate_content( | |
contents=[user_input] + image_prompts, | |
generation_config=generation_config | |
) | |
except Exception as e: | |
st.write(f"An error occurred: {e}") | |
# No need to return here | |
# Add model response to chat history | |
st.session_state["chat_history"].append({"role": "model", "parts": [{"text": response}]}) | |
# Display chat history | |
for message in st.session_state["chat_history"]: | |
r, t = message["role"], message["parts"][0]["text"] | |
st.markdown(f"**{r.title()}:** {t}") | |
# Save chat history to database | |
for message in st.session_state["chat_history"]: | |
if len(st.session_state["chat_history"]) % 2 == 0: | |
role = "user" | |
else: | |
role = "model" | |
text = str(message["parts"][0]["text"]) # Ensure the text is a string | |
c.execute("INSERT INTO history VALUES (?, ?)", (role, text)) | |
conn.commit() | |
# Clear user input | |
st.session_state.user_input = "" | |