Spaces:
Runtime error
Runtime error
import os | |
import time | |
import uuid | |
import sqlite3 | |
from typing import List, Tuple, Optional, Dict, Union | |
from PIL import Image | |
from io import BytesIO | |
import google.generativeai as genai | |
import streamlit as st | |
# Database setup | |
conn = sqlite3.connect('chat_history.db') | |
c = conn.cursor() | |
c.execute(''' | |
CREATE TABLE IF NOT EXISTS history | |
(role TEXT, message TEXT) | |
''') | |
# Generative AI setup | |
api_key = "AIzaSyC70u1sN87IkoxOoIj4XCAPw97ae2LZwNM" | |
genai.configure(api_key=api_key) | |
generation_config = { | |
"temperature": 0.9, | |
"max_output_tokens": 3000 | |
} | |
safety_settings = [] | |
# Streamlit UI | |
st.set_page_config(page_title="Chatbot", page_icon="🤖") | |
# Header with logo | |
st.markdown(""" | |
<style> | |
.container { | |
display: flex; | |
} | |
.logo-text { | |
font-weight:700 !important; | |
font-size:50px !important; | |
color: #f9a01b !important; | |
padding-top: 75px !important; | |
} | |
.logo-img { | |
float:right; | |
} | |
</style> | |
<div class="container"> | |
<p class="logo-text">Chatbot</p> | |
<img class="logo-img" src="https://media.roboflow.com/spaces/gemini-icon.png" width=120 height=120> | |
</div> | |
""", unsafe_allow_html=True) | |
# Sidebar for parameters and model selection | |
st.sidebar.title("Parameters") | |
temperature = st.sidebar.slider( | |
"Temperature", | |
min_value=0.0, | |
max_value=1.0, | |
value=0.9, | |
step=0.01, | |
help="Temperature controls the degree of randomness in token selection. Lower temperatures are good for prompts that expect a true or correct response, while higher temperatures can lead to more diverse or unexpected results." | |
) | |
max_output_tokens = st.sidebar.slider( | |
"Token limit", | |
min_value=1, | |
max_value=2048, | |
value=3000, | |
step=1, | |
help="Token limit determines the maximum amount of text output from one prompt. A token is approximately four characters. The default value is 2048." | |
) | |
st.sidebar.title("Model") | |
model_name = st.sidebar.selectbox( | |
"Select a model", | |
options=["gemini-pro", "gemini-pro-vision"], | |
index=0, | |
help="Gemini Pro is a text-only model that can generate natural language responses based on the chat history. Gemini Pro Vision is a multimodal model that can generate natural language responses based on the chat history and the uploaded images." | |
) | |
# Initialize user_input in session state | |
if "user_input" not in st.session_state: | |
st.session_state["user_input"] = "" | |
# Chat history | |
st.title("Chatbot") | |
if "chat_history" not in st.session_state: | |
st.session_state["chat_history"] = [] | |
for message in st.session_state["chat_history"]: | |
r, t = message["role"], message["parts"][0]["text"] | |
st.markdown(f"**{r.title()}:** {t}") | |
# User input | |
user_input = st.text_area("", height=5, key="user_input") | |
# File uploader | |
uploaded_files = st.file_uploader("Upload images here or paste screenshots", type=["png", "jpg", "jpeg"], accept_multiple_files=True, key="uploaded_files") | |
# If files are uploaded, open and display them | |
if uploaded_files: | |
for uploaded_file in uploaded_files: | |
image = Image.open(uploaded_file) | |
st.image(image) | |
# Run button | |
run_button = st.button("Run", key="run_button") | |
# Clear button | |
clear_button = st.button("Clear", key="clear_button") | |
# Download button | |
download_button = st.button("Download", key="download_button") | |
# Progress bar | |
progress_bar = st.progress(0) | |
# Footer | |
st.markdown(""" | |
<style> | |
.footer { | |
position: fixed; | |
left: 0; | |
bottom: 0; | |
width: 100%; | |
background-color: #f9a01b; | |
color: white; | |
text-align: center; | |
} | |
</style> | |
<div class="footer"> | |
<p>Made with Streamlit and Google Generative AI</p> | |
</div> | |
""", unsafe_allow_html=True) | |
# Clear chat history and image uploader | |
if clear_button: | |
st.session_state["chat_history"] = [] | |
# Update progress bar | |
progress_bar.progress(1) | |
# Generate model response | |
if run_button: | |
if model_name == "gemini-pro": | |
response = genai.generate( | |
prompt=st.session_state["user_input"], | |
max_tokens=max_output_tokens, | |
temperature=temperature, | |
safety_settings=safety_settings | |
) | |
elif model_name == "gemini-pro-vision": | |
images = [Image.open(file).convert('RGB') for file in uploaded_files] | |
response = genai.generate( | |
prompt=st.session_state["user_input"], | |
max_tokens=max_output_tokens, | |
temperature=temperature, | |
safety_settings=safety_settings, | |
images=images | |
) | |
# Add model response to chat history | |
st.session_state["chat_history"].append({"role": "model", "parts": [{"text": response}]}) | |
# Save chat history to database | |
c.execute("INSERT INTO history VALUES (?, ?)", (role, message)) | |
conn.commit() | |
# Clear user input | |
st.session_state["user_input"] = "" | |
# Rerun the app | |
st.experimental_rerun() | |
# Save chat history to a text file | |
if download_button: | |
chat_text = "\n".join([f"{r.title()}: {t}" for r, t in st.session_state["chat_history"]]) | |
st.download_button( | |
label="Download chat history", | |
data=chat_text, | |
file_name="chat_history.txt", | |
mime="text/plain" | |
) | |