Spaces:
Runtime error
Runtime error
Commit
·
2abea13
1
Parent(s):
c87aa9e
Update app.py
Browse files
app.py
CHANGED
@@ -1,30 +1,24 @@
|
|
1 |
-
import os
|
2 |
-
import time
|
3 |
-
import uuid
|
4 |
-
import sqlite3
|
5 |
-
from typing import List, Tuple, Optional, Dict, Union
|
6 |
-
from PIL import Image
|
7 |
-
from io import BytesIO
|
8 |
-
|
9 |
-
import google.generativeai as genai
|
10 |
import streamlit as st
|
|
|
|
|
|
|
11 |
|
12 |
# Database setup
|
13 |
-
conn = sqlite3.connect('chat_history.db')
|
14 |
c = conn.cursor()
|
15 |
|
16 |
c.execute('''
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
|
21 |
# Generative AI setup
|
22 |
-
api_key = "AIzaSyC70u1sN87IkoxOoIj4XCAPw97ae2LZwNM"
|
23 |
genai.configure(api_key=api_key)
|
24 |
|
25 |
generation_config = {
|
26 |
-
|
27 |
-
|
28 |
}
|
29 |
|
30 |
safety_settings = []
|
@@ -80,23 +74,18 @@ model_name = st.sidebar.selectbox(
|
|
80 |
help="Gemini Pro is a text-only model that can generate natural language responses based on the chat history. Gemini Pro Vision is a multimodal model that can generate natural language responses based on the chat history and the uploaded images."
|
81 |
)
|
82 |
|
83 |
-
# Initialize
|
84 |
-
if "user_input" not in st.session_state:
|
85 |
-
st.session_state["user_input"] = ""
|
86 |
-
|
87 |
-
# Chat history
|
88 |
-
st.title("Chatbot")
|
89 |
if "chat_history" not in st.session_state:
|
90 |
-
st.session_state["chat_history"] = []
|
91 |
|
|
|
|
|
92 |
for message in st.session_state["chat_history"]:
|
93 |
r, t = message["role"], message["parts"][0]["text"]
|
94 |
st.markdown(f"**{r.title()}:** {t}")
|
95 |
|
96 |
# User input
|
97 |
-
|
98 |
-
st.session_state["user_input"] = st.session_state.user_input
|
99 |
-
user_input = st.text_area("", height=5, key="user_input", on_change=update_user_input)
|
100 |
|
101 |
# File uploader
|
102 |
uploaded_files = st.file_uploader("Upload images here or paste screenshots", type=["png", "jpg", "jpeg"], accept_multiple_files=True, key="uploaded_files")
|
@@ -107,27 +96,44 @@ if uploaded_files:
|
|
107 |
image = Image.open(uploaded_file)
|
108 |
st.image(image)
|
109 |
|
110 |
-
# Run button
|
111 |
-
# Run button
|
112 |
-
run_button = st.button("Run")
|
113 |
-
|
114 |
# Clear button
|
115 |
-
clear_button = st.button("Clear")
|
116 |
|
117 |
# Download button
|
118 |
-
download_button = st.button("Download")
|
119 |
|
120 |
# Progress bar
|
121 |
progress_bar = st.progress(0)
|
122 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
123 |
# Clear chat history and image uploader
|
124 |
if clear_button:
|
125 |
st.session_state["chat_history"] = []
|
126 |
# Update progress bar
|
127 |
progress_bar.progress(1)
|
|
|
|
|
|
|
|
|
|
|
128 |
|
129 |
-
# Handle run_button click
|
130 |
-
if run_button:
|
131 |
# Create a GenerationConfig instance
|
132 |
generation_config = genai.GenerationConfig(
|
133 |
temperature=temperature,
|
@@ -139,7 +145,7 @@ if run_button:
|
|
139 |
if model_name == "gemini-pro":
|
140 |
model = genai.GenerativeModel('gemini-pro')
|
141 |
response = model.generate_content(
|
142 |
-
contents=[
|
143 |
generation_config=generation_config
|
144 |
)
|
145 |
elif model_name == "gemini-pro-vision":
|
@@ -147,13 +153,18 @@ if run_button:
|
|
147 |
image_prompts = [{'mime_type': 'image/png', 'data': image.tobytes()} for image in images]
|
148 |
model = genai.GenerativeModel('gemini-pro-vision')
|
149 |
response = model.generate_content(
|
150 |
-
contents=[
|
151 |
generation_config=generation_config
|
152 |
)
|
153 |
|
154 |
# Add model response to chat history
|
155 |
st.session_state["chat_history"].append({"role": "model", "parts": [{"text": response}]})
|
156 |
|
|
|
|
|
|
|
|
|
|
|
157 |
# Save chat history to database
|
158 |
for message in st.session_state["chat_history"]:
|
159 |
if len(st.session_state["chat_history"]) % 2 == 0:
|
@@ -165,26 +176,5 @@ if run_button:
|
|
165 |
conn.commit()
|
166 |
|
167 |
# Clear user input
|
168 |
-
st.session_state
|
169 |
-
|
170 |
-
# Rerun the app
|
171 |
-
st.experimental_rerun()
|
172 |
-
|
173 |
-
# Footer
|
174 |
-
st.markdown("""
|
175 |
-
<style>
|
176 |
-
.footer {
|
177 |
-
position: fixed;
|
178 |
-
left: 0;
|
179 |
-
bottom: 0;
|
180 |
-
width: 100%;
|
181 |
-
background-color: #f9a01b;
|
182 |
-
color: white;
|
183 |
-
text-align: center;
|
184 |
-
}
|
185 |
-
</style>
|
186 |
-
<div class="footer">
|
187 |
-
<p>Made with Streamlit and Google Generative AI</p>
|
188 |
-
</div>
|
189 |
-
""", unsafe_allow_html=True)
|
190 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import streamlit as st
|
2 |
+
import google.generativeai as genai
|
3 |
+
import sqlite3
|
4 |
+
from PIL import Image
|
5 |
|
6 |
# Database setup
|
7 |
+
conn = sqlite3.connect('chat_history.db')
|
8 |
c = conn.cursor()
|
9 |
|
10 |
c.execute('''
|
11 |
+
CREATE TABLE IF NOT EXISTS history
|
12 |
+
(role TEXT, message TEXT)
|
13 |
+
''')
|
14 |
|
15 |
# Generative AI setup
|
16 |
+
api_key = "AIzaSyC70u1sN87IkoxOoIj4XCAPw97ae2LZwNM"
|
17 |
genai.configure(api_key=api_key)
|
18 |
|
19 |
generation_config = {
|
20 |
+
"temperature": 0.9,
|
21 |
+
"max_output_tokens": 3000
|
22 |
}
|
23 |
|
24 |
safety_settings = []
|
|
|
74 |
help="Gemini Pro is a text-only model that can generate natural language responses based on the chat history. Gemini Pro Vision is a multimodal model that can generate natural language responses based on the chat history and the uploaded images."
|
75 |
)
|
76 |
|
77 |
+
# Initialize chat_history in session state
|
|
|
|
|
|
|
|
|
|
|
78 |
if "chat_history" not in st.session_state:
|
79 |
+
st.session_state["chat_history"] = []
|
80 |
|
81 |
+
# Display chat history
|
82 |
+
st.title("Chatbot")
|
83 |
for message in st.session_state["chat_history"]:
|
84 |
r, t = message["role"], message["parts"][0]["text"]
|
85 |
st.markdown(f"**{r.title()}:** {t}")
|
86 |
|
87 |
# User input
|
88 |
+
user_input = st.text_area("", height=5, key="user_input")
|
|
|
|
|
89 |
|
90 |
# File uploader
|
91 |
uploaded_files = st.file_uploader("Upload images here or paste screenshots", type=["png", "jpg", "jpeg"], accept_multiple_files=True, key="uploaded_files")
|
|
|
96 |
image = Image.open(uploaded_file)
|
97 |
st.image(image)
|
98 |
|
|
|
|
|
|
|
|
|
99 |
# Clear button
|
100 |
+
clear_button = st.button("Clear", key="clear_button")
|
101 |
|
102 |
# Download button
|
103 |
+
download_button = st.button("Download", key="download_button")
|
104 |
|
105 |
# Progress bar
|
106 |
progress_bar = st.progress(0)
|
107 |
|
108 |
+
# Footer
|
109 |
+
st.markdown("""
|
110 |
+
<style>
|
111 |
+
.footer {
|
112 |
+
position: fixed;
|
113 |
+
left: 0;
|
114 |
+
bottom: 0;
|
115 |
+
width: 100%;
|
116 |
+
background-color: #f9a01b;
|
117 |
+
color: white;
|
118 |
+
text-align: center;
|
119 |
+
}
|
120 |
+
</style>
|
121 |
+
<div class="footer">
|
122 |
+
<p>Made with Streamlit and Google Generative AI</p>
|
123 |
+
</div>
|
124 |
+
""", unsafe_allow_html=True)
|
125 |
+
|
126 |
# Clear chat history and image uploader
|
127 |
if clear_button:
|
128 |
st.session_state["chat_history"] = []
|
129 |
# Update progress bar
|
130 |
progress_bar.progress(1)
|
131 |
+
|
132 |
+
# Handle user input
|
133 |
+
if user_input:
|
134 |
+
# Add user input to chat history
|
135 |
+
st.session_state["chat_history"].append({"role": "user", "parts": [{"text": user_input}]})
|
136 |
|
|
|
|
|
137 |
# Create a GenerationConfig instance
|
138 |
generation_config = genai.GenerationConfig(
|
139 |
temperature=temperature,
|
|
|
145 |
if model_name == "gemini-pro":
|
146 |
model = genai.GenerativeModel('gemini-pro')
|
147 |
response = model.generate_content(
|
148 |
+
contents=[user_input],
|
149 |
generation_config=generation_config
|
150 |
)
|
151 |
elif model_name == "gemini-pro-vision":
|
|
|
153 |
image_prompts = [{'mime_type': 'image/png', 'data': image.tobytes()} for image in images]
|
154 |
model = genai.GenerativeModel('gemini-pro-vision')
|
155 |
response = model.generate_content(
|
156 |
+
contents=[user_input] + image_prompts,
|
157 |
generation_config=generation_config
|
158 |
)
|
159 |
|
160 |
# Add model response to chat history
|
161 |
st.session_state["chat_history"].append({"role": "model", "parts": [{"text": response}]})
|
162 |
|
163 |
+
# Display chat history
|
164 |
+
for message in st.session_state["chat_history"]:
|
165 |
+
r, t = message["role"], message["parts"][0]["text"]
|
166 |
+
st.markdown(f"**{r.title()}:** {t}")
|
167 |
+
|
168 |
# Save chat history to database
|
169 |
for message in st.session_state["chat_history"]:
|
170 |
if len(st.session_state["chat_history"]) % 2 == 0:
|
|
|
176 |
conn.commit()
|
177 |
|
178 |
# Clear user input
|
179 |
+
st.session_state.user_input = ""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
180 |
|