Spaces:
Runtime error
Runtime error
Commit
·
6335d32
1
Parent(s):
4fa0fd6
Update app.py
Browse files
app.py
CHANGED
@@ -1,17 +1,7 @@
|
|
1 |
import streamlit as st
|
2 |
import google.generativeai as genai
|
3 |
-
import sqlite3
|
4 |
from streamlit import file_uploader
|
5 |
|
6 |
-
# Database setup
|
7 |
-
conn = sqlite3.connect('chat_history.db')
|
8 |
-
c = conn.cursor()
|
9 |
-
|
10 |
-
c.execute('''
|
11 |
-
CREATE TABLE IF NOT EXISTS history
|
12 |
-
(role TEXT, message TEXT)
|
13 |
-
''')
|
14 |
-
|
15 |
# Generative AI setup
|
16 |
api_key = "AIzaSyC70u1sN87IkoxOoIj4XCAPw97ae2LZwNM"
|
17 |
genai.configure(api_key=api_key)
|
@@ -26,61 +16,35 @@ safety_settings = []
|
|
26 |
# Streamlit UI
|
27 |
st.title("Chatbot")
|
28 |
|
29 |
-
|
|
|
30 |
|
31 |
-
|
32 |
-
|
33 |
-
else:
|
34 |
-
role = "model"
|
35 |
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
user_input = st.file_uploader("Upload a text or an image file here", type=["txt", "png", "jpg", "jpeg"])
|
42 |
-
if user_input:
|
43 |
-
# Check the file type and create the prompt parts accordingly
|
44 |
-
if user_input.type == "text/plain":
|
45 |
-
prompt_parts = [user_input.getvalue().decode("utf-8")]
|
46 |
model_name = "gemini-pro" # Use the text-only model
|
47 |
else:
|
48 |
prompt_parts = [{
|
49 |
-
"mime_type":
|
50 |
-
"data":
|
51 |
}]
|
52 |
model_name = "gemini-pro-vision" # Use the multimodal model
|
53 |
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
st.session_state["chat_history"] = chat_history
|
69 |
-
|
70 |
-
for message in chat_history:
|
71 |
-
r, t = message["role"], message["parts"][0]["text"]
|
72 |
-
st.markdown(f"**{r.title()}:** {t}")
|
73 |
-
if st.button("Display History"):
|
74 |
-
c.execute("SELECT * FROM history")
|
75 |
-
rows = c.fetchall()
|
76 |
-
|
77 |
-
for row in rows:
|
78 |
-
st.markdown(f"**{row[0].title()}:** {row[1]}")
|
79 |
-
|
80 |
-
# Save chat history to database
|
81 |
-
for message in chat_history:
|
82 |
-
c.execute("INSERT INTO history VALUES (?, ?)",
|
83 |
-
(message["role"], message["parts"][0]["text"]))
|
84 |
-
conn.commit()
|
85 |
-
|
86 |
-
conn.close()
|
|
|
1 |
import streamlit as st
|
2 |
import google.generativeai as genai
|
|
|
3 |
from streamlit import file_uploader
|
4 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
5 |
# Generative AI setup
|
6 |
api_key = "AIzaSyC70u1sN87IkoxOoIj4XCAPw97ae2LZwNM"
|
7 |
genai.configure(api_key=api_key)
|
|
|
16 |
# Streamlit UI
|
17 |
st.title("Chatbot")
|
18 |
|
19 |
+
# Use text_input for text input by typing
|
20 |
+
user_text = st.text_input("Type your text here:")
|
21 |
|
22 |
+
# Use file_uploader for image input
|
23 |
+
user_image = st.file_uploader("Upload an image file here", type=["png", "jpg", "jpeg"])
|
|
|
|
|
24 |
|
25 |
+
# Check if the user has entered text or uploaded an image
|
26 |
+
if user_text or user_image:
|
27 |
+
# Create the prompt parts accordingly
|
28 |
+
if user_text:
|
29 |
+
prompt_parts = [user_text]
|
|
|
|
|
|
|
|
|
|
|
30 |
model_name = "gemini-pro" # Use the text-only model
|
31 |
else:
|
32 |
prompt_parts = [{
|
33 |
+
"mime_type": user_image.type,
|
34 |
+
"data": user_image.read()
|
35 |
}]
|
36 |
model_name = "gemini-pro-vision" # Use the multimodal model
|
37 |
|
38 |
+
# Model code
|
39 |
+
model = genai.GenerativeModel(
|
40 |
+
model_name=model_name,
|
41 |
+
generation_config=generation_config,
|
42 |
+
safety_settings=safety_settings
|
43 |
+
)
|
44 |
+
|
45 |
+
response = model.generate_content(prompt_parts)
|
46 |
+
response_text = response.text
|
47 |
+
|
48 |
+
# Display the user input and the model response
|
49 |
+
st.markdown(f"**User:** {prompt_parts[0]['text']}")
|
50 |
+
st.markdown(f"**Model:** {response_text}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|