ziyadsuper2017 commited on
Commit
c1bab7b
·
1 Parent(s): bd1d195

Update app.py

Browse files

Reverted the code as previously without reset button. Trying to make the textbox multiline.

Files changed (1) hide show
  1. app.py +49 -56
app.py CHANGED
@@ -1,24 +1,24 @@
1
  import streamlit as st
2
- import google.generativeai as genai
3
- import sqlite3
4
  from streamlit import file_uploader
5
 
6
  # Database setup
7
- conn = sqlite3.connect('chat_history.db')
8
- c = conn.cursor()
9
 
10
  c.execute('''
11
- CREATE TABLE IF NOT EXISTS history
12
  (role TEXT, message TEXT)
13
  ''')
14
 
15
  # Generative AI setup
16
- api_key = "AIzaSyC70u1sN87IkoxOoIj4XCAPw97ae2LZwNM"
17
  genai.configure(api_key=api_key)
18
 
19
  generation_config = {
20
  "temperature": 0.9,
21
- "max_output_tokens": 500
22
  }
23
 
24
  safety_settings = []
@@ -26,7 +26,7 @@ safety_settings = []
26
  # Streamlit UI
27
  st.title("Chatbot")
28
 
29
- chat_history = st.session_state.get("chat_history", [])
30
 
31
  if len(chat_history) % 2 == 0:
32
  role = "user"
@@ -34,18 +34,19 @@ else:
34
  role = "model"
35
 
36
  for message in chat_history:
37
- r, t = message["role"], message["parts"][0]["text"]
38
  st.markdown(f"**{r.title()}:** {t}")
39
 
40
- user_input = st.text_input("")
 
41
  if user_input:
42
- chat_history.append({"role": role, "parts": [{"text": user_input}]})
43
  if role == "user":
44
 
45
- # If only text is entered, follow the previous code
46
  model_name = "gemini-pro"
47
  model = genai.GenerativeModel(
48
- model_name=model_name,
49
  generation_config=generation_config,
50
  safety_settings=safety_settings
51
  )
@@ -57,57 +58,49 @@ if user_input:
57
  st.session_state["chat_history"] = chat_history
58
 
59
  for message in chat_history:
60
- r, t = message["role"], message["parts"][0]["text"]
61
  st.markdown(f"**{r.title()}:** {t}")
 
 
 
62
 
63
-
64
- # Create a placeholder for the history display
65
- history_placeholder = st.empty()
66
-
67
- # Use the key argument for the buttons
68
- if st.button("Display History", key="display"):
69
- c.execute("SELECT * FROM history")
70
- rows = c.fetchall()
71
-
72
- # Fill the placeholder with the history
73
- for row in rows:
74
- history_placeholder.markdown(f"**{row[0].title()}:** {row[1]}")
75
 
76
- # Define a function to clear the placeholder
77
- def clear_history():
78
- history_placeholder.empty()
79
-
80
- # Use the key argument for the buttons
81
- if st.button("Reset History", key="reset", on_click=clear_history):
82
- pass
83
 
84
  conn.close()
85
 
86
  # Separate section for image uploading
87
- st.title("Image Description Generator")
88
- uploaded_file = st.file_uploader("Upload an image here", type=["png", "jpg", "jpeg"])
89
 
90
- # Text input for asking questions about the image:
 
 
91
  image_question = st.text_input("Ask something about the image:")
92
 
93
  if uploaded_file and image_question:
94
- image_parts = [
95
- {
96
- "mime_type": uploaded_file.type,
97
- "data": uploaded_file.read()
98
- },
99
- ]
100
-
101
- prompt_parts = [
102
- image_question,
103
- image_parts[0],
104
- ]
105
-
106
- model = genai.GenerativeModel(
107
- model_name="gemini-pro-vision",
108
- generation_config=generation_config,
109
- safety_settings=safety_settings
110
- )
111
-
112
- response = model.generate_content(prompt_parts)
113
- st.markdown(f"**Model's answer:** {response.text}")
 
1
  import streamlit as st
2
+ import google.generativeai as genai
3
+ import sqlite3
4
  from streamlit import file_uploader
5
 
6
  # Database setup
7
+ conn = sqlite3.connect('chat_history.db')
8
+ c = conn.cursor()
9
 
10
  c.execute('''
11
+ CREATE TABLE IF NOT EXISTS history
12
  (role TEXT, message TEXT)
13
  ''')
14
 
15
  # Generative AI setup
16
+ api_key = "AIzaSyC70u1sN87IkoxOoIj4XCAPw97ae2LZwNM"
17
  genai.configure(api_key=api_key)
18
 
19
  generation_config = {
20
  "temperature": 0.9,
21
+ "max_output_tokens": 500
22
  }
23
 
24
  safety_settings = []
 
26
  # Streamlit UI
27
  st.title("Chatbot")
28
 
29
+ chat_history = st.session_state.get("chat_history", [])
30
 
31
  if len(chat_history) % 2 == 0:
32
  role = "user"
 
34
  role = "model"
35
 
36
  for message in chat_history:
37
+ r, t = message["role"], message["parts"][0]["text"]
38
  st.markdown(f"**{r.title()}:** {t}")
39
 
40
+ # Use text_area for multiline input
41
+ user_input = st.text_area("", height=5)
42
  if user_input:
43
+ chat_history.append({"role": role, "parts": [{"text": user_input}]})
44
  if role == "user":
45
 
46
+ # Model code
47
  model_name = "gemini-pro"
48
  model = genai.GenerativeModel(
49
+ model_name=model_name,
50
  generation_config=generation_config,
51
  safety_settings=safety_settings
52
  )
 
58
  st.session_state["chat_history"] = chat_history
59
 
60
  for message in chat_history:
61
+ r, t = message["role"], message["parts"][0]["text"]
62
  st.markdown(f"**{r.title()}:** {t}")
63
+ if st.button("Display History"):
64
+ c.execute("SELECT * FROM history")
65
+ rows = c.fetchall()
66
 
67
+ for row in rows:
68
+ st.markdown(f"**{row[0].title()}:** {row[1]}")
 
 
 
 
 
 
 
 
 
 
69
 
70
+ # Save chat history to database
71
+ for message in chat_history:
72
+ c.execute("INSERT INTO history VALUES (?, ?)",
73
+ (message["role"], message["parts"][0]["text"]))
74
+ conn.commit()
 
 
75
 
76
  conn.close()
77
 
78
  # Separate section for image uploading
79
+ st.title("Image Description Generator")
 
80
 
81
+ uploaded_file = st.file_uploader("Upload an image here", type=["png", "jpg", "jpeg"])
82
+
83
+ # Text input for asking questions about the image
84
  image_question = st.text_input("Ask something about the image:")
85
 
86
  if uploaded_file and image_question:
87
+ image_parts = [
88
+ {
89
+ "mime_type": uploaded_file.type,
90
+ "data": uploaded_file.read()
91
+ },
92
+ ]
93
+
94
+ prompt_parts = [
95
+ image_question,
96
+ image_parts[0],
97
+ ]
98
+
99
+ model = genai.GenerativeModel(
100
+ model_name="gemini-pro-vision",
101
+ generation_config=generation_config,
102
+ safety_settings=safety_settings
103
+ )
104
+
105
+ response = model.generate_content(prompt_parts)
106
+ st.markdown(f"**Model's answer:** {response.text}")